summaryrefslogtreecommitdiff
path: root/sys/src/cmd/hg/hgext
diff options
context:
space:
mode:
authorcinap_lenrek <cinap_lenrek@localhost>2011-05-03 11:25:13 +0000
committercinap_lenrek <cinap_lenrek@localhost>2011-05-03 11:25:13 +0000
commit458120dd40db6b4df55a4e96b650e16798ef06a0 (patch)
tree8f82685be24fef97e715c6f5ca4c68d34d5074ee /sys/src/cmd/hg/hgext
parent3a742c699f6806c1145aea5149bf15de15a0afd7 (diff)
add hg and python
Diffstat (limited to 'sys/src/cmd/hg/hgext')
-rw-r--r--sys/src/cmd/hg/hgext/__init__.py1
-rw-r--r--sys/src/cmd/hg/hgext/acl.py107
-rw-r--r--sys/src/cmd/hg/hgext/bookmarks.py340
-rw-r--r--sys/src/cmd/hg/hgext/bugzilla.py439
-rw-r--r--sys/src/cmd/hg/hgext/children.py44
-rw-r--r--sys/src/cmd/hg/hgext/churn.py174
-rw-r--r--sys/src/cmd/hg/hgext/color.py286
-rw-r--r--sys/src/cmd/hg/hgext/convert/__init__.py296
-rw-r--r--sys/src/cmd/hg/hgext/convert/bzr.py259
-rw-r--r--sys/src/cmd/hg/hgext/convert/common.py389
-rw-r--r--sys/src/cmd/hg/hgext/convert/convcmd.py396
-rw-r--r--sys/src/cmd/hg/hgext/convert/cvs.py372
-rw-r--r--sys/src/cmd/hg/hgext/convert/cvsps.py831
-rw-r--r--sys/src/cmd/hg/hgext/convert/darcs.py135
-rw-r--r--sys/src/cmd/hg/hgext/convert/filemap.py359
-rw-r--r--sys/src/cmd/hg/hgext/convert/git.py152
-rw-r--r--sys/src/cmd/hg/hgext/convert/gnuarch.py342
-rw-r--r--sys/src/cmd/hg/hgext/convert/hg.py363
-rw-r--r--sys/src/cmd/hg/hgext/convert/monotone.py217
-rw-r--r--sys/src/cmd/hg/hgext/convert/p4.py205
-rw-r--r--sys/src/cmd/hg/hgext/convert/subversion.py1136
-rw-r--r--sys/src/cmd/hg/hgext/convert/transport.py128
-rw-r--r--sys/src/cmd/hg/hgext/extdiff.py228
-rw-r--r--sys/src/cmd/hg/hgext/fetch.py148
-rw-r--r--sys/src/cmd/hg/hgext/gpg.py284
-rw-r--r--sys/src/cmd/hg/hgext/graphlog.py378
-rw-r--r--sys/src/cmd/hg/hgext/hgcia.py246
-rw-r--r--sys/src/cmd/hg/hgext/hgk.py347
-rw-r--r--sys/src/cmd/hg/hgext/highlight/__init__.py60
-rw-r--r--sys/src/cmd/hg/hgext/highlight/highlight.py60
-rw-r--r--sys/src/cmd/hg/hgext/inotify/__init__.py109
-rw-r--r--sys/src/cmd/hg/hgext/inotify/client.py160
-rw-r--r--sys/src/cmd/hg/hgext/inotify/common.py51
-rw-r--r--sys/src/cmd/hg/hgext/inotify/linux/__init__.py41
-rw-r--r--sys/src/cmd/hg/hgext/inotify/linux/_inotify.c608
-rw-r--r--sys/src/cmd/hg/hgext/inotify/linux/watcher.py335
-rw-r--r--sys/src/cmd/hg/hgext/inotify/server.py874
-rw-r--r--sys/src/cmd/hg/hgext/interhg.py80
-rw-r--r--sys/src/cmd/hg/hgext/keyword.py555
-rw-r--r--sys/src/cmd/hg/hgext/mq.py2653
-rw-r--r--sys/src/cmd/hg/hgext/notify.py298
-rw-r--r--sys/src/cmd/hg/hgext/pager.py64
-rw-r--r--sys/src/cmd/hg/hgext/parentrevspec.py96
-rw-r--r--sys/src/cmd/hg/hgext/patchbomb.py513
-rw-r--r--sys/src/cmd/hg/hgext/purge.py111
-rw-r--r--sys/src/cmd/hg/hgext/rebase.py471
-rw-r--r--sys/src/cmd/hg/hgext/record.py551
-rw-r--r--sys/src/cmd/hg/hgext/share.py30
-rw-r--r--sys/src/cmd/hg/hgext/transplant.py606
-rw-r--r--sys/src/cmd/hg/hgext/win32mbcs.py147
-rw-r--r--sys/src/cmd/hg/hgext/win32text.py158
-rw-r--r--sys/src/cmd/hg/hgext/zeroconf/Zeroconf.py1573
-rw-r--r--sys/src/cmd/hg/hgext/zeroconf/__init__.py159
53 files changed, 18965 insertions, 0 deletions
diff --git a/sys/src/cmd/hg/hgext/__init__.py b/sys/src/cmd/hg/hgext/__init__.py
new file mode 100644
index 000000000..fdffa2a0f
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/__init__.py
@@ -0,0 +1 @@
+# placeholder
diff --git a/sys/src/cmd/hg/hgext/acl.py b/sys/src/cmd/hg/hgext/acl.py
new file mode 100644
index 000000000..f9b3927af
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/acl.py
@@ -0,0 +1,107 @@
+# acl.py - changeset access control for mercurial
+#
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+#
+
+'''hooks for controlling repository access
+
+This hook makes it possible to allow or deny write access to portions
+of a repository when receiving incoming changesets.
+
+The authorization is matched based on the local user name on the
+system where the hook runs, and not the committer of the original
+changeset (since the latter is merely informative).
+
+The acl hook is best used along with a restricted shell like hgsh,
+preventing authenticating users from doing anything other than
+pushing or pulling. The hook is not safe to use if users have
+interactive shell access, as they can then disable the hook.
+Nor is it safe if remote users share an account, because then there
+is no way to distinguish them.
+
+To use this hook, configure the acl extension in your hgrc like this::
+
+ [extensions]
+ hgext.acl =
+
+ [hooks]
+ pretxnchangegroup.acl = python:hgext.acl.hook
+
+ [acl]
+ # Check whether the source of incoming changes is in this list
+ # ("serve" == ssh or http, "push", "pull", "bundle")
+ sources = serve
+
+The allow and deny sections take a subtree pattern as key (with a glob
+syntax by default), and a comma separated list of users as the
+corresponding value. The deny list is checked before the allow list
+is. ::
+
+ [acl.allow]
+ # If acl.allow is not present, all users are allowed by default.
+ # An empty acl.allow section means no users allowed.
+ docs/** = doc_writer
+ .hgtags = release_engineer
+
+ [acl.deny]
+ # If acl.deny is not present, no users are refused by default.
+ # An empty acl.deny section means all users allowed.
+ glob pattern = user4, user5
+ ** = user6
+'''
+
+from mercurial.i18n import _
+from mercurial import util, match
+import getpass, urllib
+
+def buildmatch(ui, repo, user, key):
+ '''return tuple of (match function, list enabled).'''
+ if not ui.has_section(key):
+ ui.debug(_('acl: %s not enabled\n') % key)
+ return None
+
+ pats = [pat for pat, users in ui.configitems(key)
+ if user in users.replace(',', ' ').split()]
+ ui.debug(_('acl: %s enabled, %d entries for user %s\n') %
+ (key, len(pats), user))
+ if pats:
+ return match.match(repo.root, '', pats)
+ return match.exact(repo.root, '', [])
+
+
+def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
+ if hooktype != 'pretxnchangegroup':
+ raise util.Abort(_('config error - hook type "%s" cannot stop '
+ 'incoming changesets') % hooktype)
+ if source not in ui.config('acl', 'sources', 'serve').split():
+ ui.debug(_('acl: changes have source "%s" - skipping\n') % source)
+ return
+
+ user = None
+ if source == 'serve' and 'url' in kwargs:
+ url = kwargs['url'].split(':')
+ if url[0] == 'remote' and url[1].startswith('http'):
+ user = urllib.unquote(url[3])
+
+ if user is None:
+ user = getpass.getuser()
+
+ cfg = ui.config('acl', 'config')
+ if cfg:
+ ui.readconfig(cfg, sections = ['acl.allow', 'acl.deny'])
+ allow = buildmatch(ui, repo, user, 'acl.allow')
+ deny = buildmatch(ui, repo, user, 'acl.deny')
+
+ for rev in xrange(repo[node], len(repo)):
+ ctx = repo[rev]
+ for f in ctx.files():
+ if deny and deny(f):
+ ui.debug(_('acl: user %s denied on %s\n') % (user, f))
+ raise util.Abort(_('acl: access denied for changeset %s') % ctx)
+ if allow and not allow(f):
+ ui.debug(_('acl: user %s not allowed on %s\n') % (user, f))
+ raise util.Abort(_('acl: access denied for changeset %s') % ctx)
+ ui.debug(_('acl: allowing changeset %s\n') % ctx)
diff --git a/sys/src/cmd/hg/hgext/bookmarks.py b/sys/src/cmd/hg/hgext/bookmarks.py
new file mode 100644
index 000000000..58aaec4fa
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/bookmarks.py
@@ -0,0 +1,340 @@
+# Mercurial extension to provide the 'hg bookmark' command
+#
+# Copyright 2008 David Soria Parra <dsp@php.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''track a line of development with movable markers
+
+Bookmarks are local movable markers to changesets. Every bookmark
+points to a changeset identified by its hash. If you commit a
+changeset that is based on a changeset that has a bookmark on it, the
+bookmark shifts to the new changeset.
+
+It is possible to use bookmark names in every revision lookup (e.g. hg
+merge, hg update).
+
+By default, when several bookmarks point to the same changeset, they
+will all move forward together. It is possible to obtain a more
+git-like experience by adding the following configuration option to
+your .hgrc::
+
+ [bookmarks]
+ track.current = True
+
+This will cause Mercurial to track the bookmark that you are currently
+using, and only update it. This is similar to git's approach to
+branching.
+'''
+
+from mercurial.i18n import _
+from mercurial.node import nullid, nullrev, hex, short
+from mercurial import util, commands, localrepo, repair, extensions
+import os
+
+def parse(repo):
+ '''Parse .hg/bookmarks file and return a dictionary
+
+ Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
+ in the .hg/bookmarks file. They are read by the parse() method and
+ returned as a dictionary with name => hash values.
+
+ The parsed dictionary is cached until a write() operation is done.
+ '''
+ try:
+ if repo._bookmarks:
+ return repo._bookmarks
+ repo._bookmarks = {}
+ for line in repo.opener('bookmarks'):
+ sha, refspec = line.strip().split(' ', 1)
+ repo._bookmarks[refspec] = repo.lookup(sha)
+ except:
+ pass
+ return repo._bookmarks
+
+def write(repo, refs):
+ '''Write bookmarks
+
+ Write the given bookmark => hash dictionary to the .hg/bookmarks file
+ in a format equal to those of localtags.
+
+ We also store a backup of the previous state in undo.bookmarks that
+ can be copied back on rollback.
+ '''
+ if os.path.exists(repo.join('bookmarks')):
+ util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks'))
+ if current(repo) not in refs:
+ setcurrent(repo, None)
+ wlock = repo.wlock()
+ try:
+ file = repo.opener('bookmarks', 'w', atomictemp=True)
+ for refspec, node in refs.iteritems():
+ file.write("%s %s\n" % (hex(node), refspec))
+ file.rename()
+ finally:
+ wlock.release()
+
+def current(repo):
+ '''Get the current bookmark
+
+ If we use gittishsh branches we have a current bookmark that
+ we are on. This function returns the name of the bookmark. It
+ is stored in .hg/bookmarks.current
+ '''
+ if repo._bookmarkcurrent:
+ return repo._bookmarkcurrent
+ mark = None
+ if os.path.exists(repo.join('bookmarks.current')):
+ file = repo.opener('bookmarks.current')
+ # No readline() in posixfile_nt, reading everything is cheap
+ mark = (file.readlines() or [''])[0]
+ if mark == '':
+ mark = None
+ file.close()
+ repo._bookmarkcurrent = mark
+ return mark
+
+def setcurrent(repo, mark):
+ '''Set the name of the bookmark that we are currently on
+
+ Set the name of the bookmark that we are on (hg update <bookmark>).
+ The name is recorded in .hg/bookmarks.current
+ '''
+ if current(repo) == mark:
+ return
+
+ refs = parse(repo)
+
+ # do not update if we do update to a rev equal to the current bookmark
+ if (mark and mark not in refs and
+ current(repo) and refs[current(repo)] == repo.changectx('.').node()):
+ return
+ if mark not in refs:
+ mark = ''
+ wlock = repo.wlock()
+ try:
+ file = repo.opener('bookmarks.current', 'w', atomictemp=True)
+ file.write(mark)
+ file.rename()
+ finally:
+ wlock.release()
+ repo._bookmarkcurrent = mark
+
+def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None):
+ '''track a line of development with movable markers
+
+ Bookmarks are pointers to certain commits that move when
+ committing. Bookmarks are local. They can be renamed, copied and
+ deleted. It is possible to use bookmark names in 'hg merge' and
+ 'hg update' to merge and update respectively to a given bookmark.
+
+ You can use 'hg bookmark NAME' to set a bookmark on the working
+ directory's parent revision with the given name. If you specify
+ a revision using -r REV (where REV may be an existing bookmark),
+ the bookmark is assigned to that revision.
+ '''
+ hexfn = ui.debugflag and hex or short
+ marks = parse(repo)
+ cur = repo.changectx('.').node()
+
+ if rename:
+ if rename not in marks:
+ raise util.Abort(_("a bookmark of this name does not exist"))
+ if mark in marks and not force:
+ raise util.Abort(_("a bookmark of the same name already exists"))
+ if mark is None:
+ raise util.Abort(_("new bookmark name required"))
+ marks[mark] = marks[rename]
+ del marks[rename]
+ if current(repo) == rename:
+ setcurrent(repo, mark)
+ write(repo, marks)
+ return
+
+ if delete:
+ if mark is None:
+ raise util.Abort(_("bookmark name required"))
+ if mark not in marks:
+ raise util.Abort(_("a bookmark of this name does not exist"))
+ if mark == current(repo):
+ setcurrent(repo, None)
+ del marks[mark]
+ write(repo, marks)
+ return
+
+ if mark != None:
+ if "\n" in mark:
+ raise util.Abort(_("bookmark name cannot contain newlines"))
+ mark = mark.strip()
+ if mark in marks and not force:
+ raise util.Abort(_("a bookmark of the same name already exists"))
+ if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
+ and not force):
+ raise util.Abort(
+ _("a bookmark cannot have the name of an existing branch"))
+ if rev:
+ marks[mark] = repo.lookup(rev)
+ else:
+ marks[mark] = repo.changectx('.').node()
+ setcurrent(repo, mark)
+ write(repo, marks)
+ return
+
+ if mark is None:
+ if rev:
+ raise util.Abort(_("bookmark name required"))
+ if len(marks) == 0:
+ ui.status("no bookmarks set\n")
+ else:
+ for bmark, n in marks.iteritems():
+ if ui.configbool('bookmarks', 'track.current'):
+ prefix = (bmark == current(repo) and n == cur) and '*' or ' '
+ else:
+ prefix = (n == cur) and '*' or ' '
+
+ ui.write(" %s %-25s %d:%s\n" % (
+ prefix, bmark, repo.changelog.rev(n), hexfn(n)))
+ return
+
+def _revstostrip(changelog, node):
+ srev = changelog.rev(node)
+ tostrip = [srev]
+ saveheads = []
+ for r in xrange(srev, len(changelog)):
+ parents = changelog.parentrevs(r)
+ if parents[0] in tostrip or parents[1] in tostrip:
+ tostrip.append(r)
+ if parents[1] != nullrev:
+ for p in parents:
+ if p not in tostrip and p > srev:
+ saveheads.append(p)
+ return [r for r in tostrip if r not in saveheads]
+
+def strip(oldstrip, ui, repo, node, backup="all"):
+ """Strip bookmarks if revisions are stripped using
+ the mercurial.strip method. This usually happens during
+ qpush and qpop"""
+ revisions = _revstostrip(repo.changelog, node)
+ marks = parse(repo)
+ update = []
+ for mark, n in marks.iteritems():
+ if repo.changelog.rev(n) in revisions:
+ update.append(mark)
+ oldstrip(ui, repo, node, backup)
+ if len(update) > 0:
+ for m in update:
+ marks[m] = repo.changectx('.').node()
+ write(repo, marks)
+
+def reposetup(ui, repo):
+ if not isinstance(repo, localrepo.localrepository):
+ return
+
+ # init a bookmark cache as otherwise we would get a infinite reading
+ # in lookup()
+ repo._bookmarks = None
+ repo._bookmarkcurrent = None
+
+ class bookmark_repo(repo.__class__):
+ def rollback(self):
+ if os.path.exists(self.join('undo.bookmarks')):
+ util.rename(self.join('undo.bookmarks'), self.join('bookmarks'))
+ return super(bookmark_repo, self).rollback()
+
+ def lookup(self, key):
+ if self._bookmarks is None:
+ self._bookmarks = parse(self)
+ if key in self._bookmarks:
+ key = self._bookmarks[key]
+ return super(bookmark_repo, self).lookup(key)
+
+ def commitctx(self, ctx, error=False):
+ """Add a revision to the repository and
+ move the bookmark"""
+ wlock = self.wlock() # do both commit and bookmark with lock held
+ try:
+ node = super(bookmark_repo, self).commitctx(ctx, error)
+ if node is None:
+ return None
+ parents = self.changelog.parents(node)
+ if parents[1] == nullid:
+ parents = (parents[0],)
+ marks = parse(self)
+ update = False
+ if ui.configbool('bookmarks', 'track.current'):
+ mark = current(self)
+ if mark and marks[mark] in parents:
+ marks[mark] = node
+ update = True
+ else:
+ for mark, n in marks.items():
+ if n in parents:
+ marks[mark] = node
+ update = True
+ if update:
+ write(self, marks)
+ return node
+ finally:
+ wlock.release()
+
+ def addchangegroup(self, source, srctype, url, emptyok=False):
+ parents = self.dirstate.parents()
+
+ result = super(bookmark_repo, self).addchangegroup(
+ source, srctype, url, emptyok)
+ if result > 1:
+ # We have more heads than before
+ return result
+ node = self.changelog.tip()
+ marks = parse(self)
+ update = False
+ if ui.configbool('bookmarks', 'track.current'):
+ mark = current(self)
+ if mark and marks[mark] in parents:
+ marks[mark] = node
+ update = True
+ else:
+ for mark, n in marks.items():
+ if n in parents:
+ marks[mark] = node
+ update = True
+ if update:
+ write(self, marks)
+ return result
+
+ def _findtags(self):
+ """Merge bookmarks with normal tags"""
+ (tags, tagtypes) = super(bookmark_repo, self)._findtags()
+ tags.update(parse(self))
+ return (tags, tagtypes)
+
+ repo.__class__ = bookmark_repo
+
+def uisetup(ui):
+ extensions.wrapfunction(repair, "strip", strip)
+ if ui.configbool('bookmarks', 'track.current'):
+ extensions.wrapcommand(commands.table, 'update', updatecurbookmark)
+
+def updatecurbookmark(orig, ui, repo, *args, **opts):
+ '''Set the current bookmark
+
+ If the user updates to a bookmark we update the .hg/bookmarks.current
+ file.
+ '''
+ res = orig(ui, repo, *args, **opts)
+ rev = opts['rev']
+ if not rev and len(args) > 0:
+ rev = args[0]
+ setcurrent(repo, rev)
+ return res
+
+cmdtable = {
+ "bookmarks":
+ (bookmark,
+ [('f', 'force', False, _('force')),
+ ('r', 'rev', '', _('revision')),
+ ('d', 'delete', False, _('delete a given bookmark')),
+ ('m', 'rename', '', _('rename a given bookmark'))],
+ _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')),
+}
diff --git a/sys/src/cmd/hg/hgext/bugzilla.py b/sys/src/cmd/hg/hgext/bugzilla.py
new file mode 100644
index 000000000..774ed3385
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/bugzilla.py
@@ -0,0 +1,439 @@
+# bugzilla.py - bugzilla integration for mercurial
+#
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''hooks for integrating with the Bugzilla bug tracker
+
+This hook extension adds comments on bugs in Bugzilla when changesets
+that refer to bugs by Bugzilla ID are seen. The hook does not change
+bug status.
+
+The hook updates the Bugzilla database directly. Only Bugzilla
+installations using MySQL are supported.
+
+The hook relies on a Bugzilla script to send bug change notification
+emails. That script changes between Bugzilla versions; the
+'processmail' script used prior to 2.18 is replaced in 2.18 and
+subsequent versions by 'config/sendbugmail.pl'. Note that these will
+be run by Mercurial as the user pushing the change; you will need to
+ensure the Bugzilla install file permissions are set appropriately.
+
+The extension is configured through three different configuration
+sections. These keys are recognized in the [bugzilla] section:
+
+host
+ Hostname of the MySQL server holding the Bugzilla database.
+
+db
+ Name of the Bugzilla database in MySQL. Default 'bugs'.
+
+user
+ Username to use to access MySQL server. Default 'bugs'.
+
+password
+ Password to use to access MySQL server.
+
+timeout
+ Database connection timeout (seconds). Default 5.
+
+version
+ Bugzilla version. Specify '3.0' for Bugzilla versions 3.0 and later,
+ '2.18' for Bugzilla versions from 2.18 and '2.16' for versions prior
+ to 2.18.
+
+bzuser
+ Fallback Bugzilla user name to record comments with, if changeset
+ committer cannot be found as a Bugzilla user.
+
+bzdir
+ Bugzilla install directory. Used by default notify. Default
+ '/var/www/html/bugzilla'.
+
+notify
+ The command to run to get Bugzilla to send bug change notification
+ emails. Substitutes from a map with 3 keys, 'bzdir', 'id' (bug id)
+ and 'user' (committer bugzilla email). Default depends on version;
+ from 2.18 it is "cd %(bzdir)s && perl -T contrib/sendbugmail.pl
+ %(id)s %(user)s".
+
+regexp
+ Regular expression to match bug IDs in changeset commit message.
+ Must contain one "()" group. The default expression matches 'Bug
+ 1234', 'Bug no. 1234', 'Bug number 1234', 'Bugs 1234,5678', 'Bug
+ 1234 and 5678' and variations thereof. Matching is case insensitive.
+
+style
+ The style file to use when formatting comments.
+
+template
+ Template to use when formatting comments. Overrides style if
+ specified. In addition to the usual Mercurial keywords, the
+ extension specifies::
+
+ {bug} The Bugzilla bug ID.
+ {root} The full pathname of the Mercurial repository.
+ {webroot} Stripped pathname of the Mercurial repository.
+ {hgweb} Base URL for browsing Mercurial repositories.
+
+ Default 'changeset {node|short} in repo {root} refers '
+ 'to bug {bug}.\\ndetails:\\n\\t{desc|tabindent}'
+
+strip
+ The number of slashes to strip from the front of {root} to produce
+ {webroot}. Default 0.
+
+usermap
+ Path of file containing Mercurial committer ID to Bugzilla user ID
+ mappings. If specified, the file should contain one mapping per
+ line, "committer"="Bugzilla user". See also the [usermap] section.
+
+The [usermap] section is used to specify mappings of Mercurial
+committer ID to Bugzilla user ID. See also [bugzilla].usermap.
+"committer"="Bugzilla user"
+
+Finally, the [web] section supports one entry:
+
+baseurl
+ Base URL for browsing Mercurial repositories. Reference from
+ templates as {hgweb}.
+
+Activating the extension::
+
+ [extensions]
+ hgext.bugzilla =
+
+ [hooks]
+ # run bugzilla hook on every change pulled or pushed in here
+ incoming.bugzilla = python:hgext.bugzilla.hook
+
+Example configuration:
+
+This example configuration is for a collection of Mercurial
+repositories in /var/local/hg/repos/ used with a local Bugzilla 3.2
+installation in /opt/bugzilla-3.2. ::
+
+ [bugzilla]
+ host=localhost
+ password=XYZZY
+ version=3.0
+ bzuser=unknown@domain.com
+ bzdir=/opt/bugzilla-3.2
+ template=Changeset {node|short} in {root|basename}.
+ {hgweb}/{webroot}/rev/{node|short}\\n
+ {desc}\\n
+ strip=5
+
+ [web]
+ baseurl=http://dev.domain.com/hg
+
+ [usermap]
+ user@emaildomain.com=user.name@bugzilladomain.com
+
+Commits add a comment to the Bugzilla bug record of the form::
+
+ Changeset 3b16791d6642 in repository-name.
+ http://dev.domain.com/hg/repository-name/rev/3b16791d6642
+
+ Changeset commit comment. Bug 1234.
+'''
+
+from mercurial.i18n import _
+from mercurial.node import short
+from mercurial import cmdutil, templater, util
+import re, time
+
+MySQLdb = None
+
+def buglist(ids):
+ return '(' + ','.join(map(str, ids)) + ')'
+
+class bugzilla_2_16(object):
+ '''support for bugzilla version 2.16.'''
+
+ def __init__(self, ui):
+ self.ui = ui
+ host = self.ui.config('bugzilla', 'host', 'localhost')
+ user = self.ui.config('bugzilla', 'user', 'bugs')
+ passwd = self.ui.config('bugzilla', 'password')
+ db = self.ui.config('bugzilla', 'db', 'bugs')
+ timeout = int(self.ui.config('bugzilla', 'timeout', 5))
+ usermap = self.ui.config('bugzilla', 'usermap')
+ if usermap:
+ self.ui.readconfig(usermap, sections=['usermap'])
+ self.ui.note(_('connecting to %s:%s as %s, password %s\n') %
+ (host, db, user, '*' * len(passwd)))
+ self.conn = MySQLdb.connect(host=host, user=user, passwd=passwd,
+ db=db, connect_timeout=timeout)
+ self.cursor = self.conn.cursor()
+ self.longdesc_id = self.get_longdesc_id()
+ self.user_ids = {}
+ self.default_notify = "cd %(bzdir)s && ./processmail %(id)s %(user)s"
+
+ def run(self, *args, **kwargs):
+ '''run a query.'''
+ self.ui.note(_('query: %s %s\n') % (args, kwargs))
+ try:
+ self.cursor.execute(*args, **kwargs)
+ except MySQLdb.MySQLError:
+ self.ui.note(_('failed query: %s %s\n') % (args, kwargs))
+ raise
+
+ def get_longdesc_id(self):
+ '''get identity of longdesc field'''
+ self.run('select fieldid from fielddefs where name = "longdesc"')
+ ids = self.cursor.fetchall()
+ if len(ids) != 1:
+ raise util.Abort(_('unknown database schema'))
+ return ids[0][0]
+
+ def filter_real_bug_ids(self, ids):
+ '''filter not-existing bug ids from list.'''
+ self.run('select bug_id from bugs where bug_id in %s' % buglist(ids))
+ return sorted([c[0] for c in self.cursor.fetchall()])
+
+ def filter_unknown_bug_ids(self, node, ids):
+ '''filter bug ids from list that already refer to this changeset.'''
+
+ self.run('''select bug_id from longdescs where
+ bug_id in %s and thetext like "%%%s%%"''' %
+ (buglist(ids), short(node)))
+ unknown = set(ids)
+ for (id,) in self.cursor.fetchall():
+ self.ui.status(_('bug %d already knows about changeset %s\n') %
+ (id, short(node)))
+ unknown.discard(id)
+ return sorted(unknown)
+
+ def notify(self, ids, committer):
+ '''tell bugzilla to send mail.'''
+
+ self.ui.status(_('telling bugzilla to send mail:\n'))
+ (user, userid) = self.get_bugzilla_user(committer)
+ for id in ids:
+ self.ui.status(_(' bug %s\n') % id)
+ cmdfmt = self.ui.config('bugzilla', 'notify', self.default_notify)
+ bzdir = self.ui.config('bugzilla', 'bzdir', '/var/www/html/bugzilla')
+ try:
+ # Backwards-compatible with old notify string, which
+ # took one string. This will throw with a new format
+ # string.
+ cmd = cmdfmt % id
+ except TypeError:
+ cmd = cmdfmt % {'bzdir': bzdir, 'id': id, 'user': user}
+ self.ui.note(_('running notify command %s\n') % cmd)
+ fp = util.popen('(%s) 2>&1' % cmd)
+ out = fp.read()
+ ret = fp.close()
+ if ret:
+ self.ui.warn(out)
+ raise util.Abort(_('bugzilla notify command %s') %
+ util.explain_exit(ret)[0])
+ self.ui.status(_('done\n'))
+
+ def get_user_id(self, user):
+ '''look up numeric bugzilla user id.'''
+ try:
+ return self.user_ids[user]
+ except KeyError:
+ try:
+ userid = int(user)
+ except ValueError:
+ self.ui.note(_('looking up user %s\n') % user)
+ self.run('''select userid from profiles
+ where login_name like %s''', user)
+ all = self.cursor.fetchall()
+ if len(all) != 1:
+ raise KeyError(user)
+ userid = int(all[0][0])
+ self.user_ids[user] = userid
+ return userid
+
+ def map_committer(self, user):
+ '''map name of committer to bugzilla user name.'''
+ for committer, bzuser in self.ui.configitems('usermap'):
+ if committer.lower() == user.lower():
+ return bzuser
+ return user
+
+ def get_bugzilla_user(self, committer):
+ '''see if committer is a registered bugzilla user. Return
+ bugzilla username and userid if so. If not, return default
+ bugzilla username and userid.'''
+ user = self.map_committer(committer)
+ try:
+ userid = self.get_user_id(user)
+ except KeyError:
+ try:
+ defaultuser = self.ui.config('bugzilla', 'bzuser')
+ if not defaultuser:
+ raise util.Abort(_('cannot find bugzilla user id for %s') %
+ user)
+ userid = self.get_user_id(defaultuser)
+ user = defaultuser
+ except KeyError:
+ raise util.Abort(_('cannot find bugzilla user id for %s or %s') %
+ (user, defaultuser))
+ return (user, userid)
+
+ def add_comment(self, bugid, text, committer):
+ '''add comment to bug. try adding comment as committer of
+ changeset, otherwise as default bugzilla user.'''
+ (user, userid) = self.get_bugzilla_user(committer)
+ now = time.strftime('%Y-%m-%d %H:%M:%S')
+ self.run('''insert into longdescs
+ (bug_id, who, bug_when, thetext)
+ values (%s, %s, %s, %s)''',
+ (bugid, userid, now, text))
+ self.run('''insert into bugs_activity (bug_id, who, bug_when, fieldid)
+ values (%s, %s, %s, %s)''',
+ (bugid, userid, now, self.longdesc_id))
+ self.conn.commit()
+
+class bugzilla_2_18(bugzilla_2_16):
+ '''support for bugzilla 2.18 series.'''
+
+ def __init__(self, ui):
+ bugzilla_2_16.__init__(self, ui)
+ self.default_notify = "cd %(bzdir)s && perl -T contrib/sendbugmail.pl %(id)s %(user)s"
+
+class bugzilla_3_0(bugzilla_2_18):
+ '''support for bugzilla 3.0 series.'''
+
+ def __init__(self, ui):
+ bugzilla_2_18.__init__(self, ui)
+
+ def get_longdesc_id(self):
+ '''get identity of longdesc field'''
+ self.run('select id from fielddefs where name = "longdesc"')
+ ids = self.cursor.fetchall()
+ if len(ids) != 1:
+ raise util.Abort(_('unknown database schema'))
+ return ids[0][0]
+
+class bugzilla(object):
+ # supported versions of bugzilla. different versions have
+ # different schemas.
+ _versions = {
+ '2.16': bugzilla_2_16,
+ '2.18': bugzilla_2_18,
+ '3.0': bugzilla_3_0
+ }
+
+ _default_bug_re = (r'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
+ r'((?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)')
+
+ _bz = None
+
+ def __init__(self, ui, repo):
+ self.ui = ui
+ self.repo = repo
+
+ def bz(self):
+ '''return object that knows how to talk to bugzilla version in
+ use.'''
+
+ if bugzilla._bz is None:
+ bzversion = self.ui.config('bugzilla', 'version')
+ try:
+ bzclass = bugzilla._versions[bzversion]
+ except KeyError:
+ raise util.Abort(_('bugzilla version %s not supported') %
+ bzversion)
+ bugzilla._bz = bzclass(self.ui)
+ return bugzilla._bz
+
+ def __getattr__(self, key):
+ return getattr(self.bz(), key)
+
+ _bug_re = None
+ _split_re = None
+
+ def find_bug_ids(self, ctx):
+ '''find valid bug ids that are referred to in changeset
+ comments and that do not already have references to this
+ changeset.'''
+
+ if bugzilla._bug_re is None:
+ bugzilla._bug_re = re.compile(
+ self.ui.config('bugzilla', 'regexp', bugzilla._default_bug_re),
+ re.IGNORECASE)
+ bugzilla._split_re = re.compile(r'\D+')
+ start = 0
+ ids = set()
+ while True:
+ m = bugzilla._bug_re.search(ctx.description(), start)
+ if not m:
+ break
+ start = m.end()
+ for id in bugzilla._split_re.split(m.group(1)):
+ if not id: continue
+ ids.add(int(id))
+ if ids:
+ ids = self.filter_real_bug_ids(ids)
+ if ids:
+ ids = self.filter_unknown_bug_ids(ctx.node(), ids)
+ return ids
+
+ def update(self, bugid, ctx):
+ '''update bugzilla bug with reference to changeset.'''
+
+ def webroot(root):
+ '''strip leading prefix of repo root and turn into
+ url-safe path.'''
+ count = int(self.ui.config('bugzilla', 'strip', 0))
+ root = util.pconvert(root)
+ while count > 0:
+ c = root.find('/')
+ if c == -1:
+ break
+ root = root[c+1:]
+ count -= 1
+ return root
+
+ mapfile = self.ui.config('bugzilla', 'style')
+ tmpl = self.ui.config('bugzilla', 'template')
+ t = cmdutil.changeset_templater(self.ui, self.repo,
+ False, None, mapfile, False)
+ if not mapfile and not tmpl:
+ tmpl = _('changeset {node|short} in repo {root} refers '
+ 'to bug {bug}.\ndetails:\n\t{desc|tabindent}')
+ if tmpl:
+ tmpl = templater.parsestring(tmpl, quoted=False)
+ t.use_template(tmpl)
+ self.ui.pushbuffer()
+ t.show(ctx, changes=ctx.changeset(),
+ bug=str(bugid),
+ hgweb=self.ui.config('web', 'baseurl'),
+ root=self.repo.root,
+ webroot=webroot(self.repo.root))
+ data = self.ui.popbuffer()
+ self.add_comment(bugid, data, util.email(ctx.user()))
+
+def hook(ui, repo, hooktype, node=None, **kwargs):
+ '''add comment to bugzilla for each changeset that refers to a
+ bugzilla bug id. only add a comment once per bug, so same change
+ seen multiple times does not fill bug with duplicate data.'''
+ try:
+ import MySQLdb as mysql
+ global MySQLdb
+ MySQLdb = mysql
+ except ImportError, err:
+ raise util.Abort(_('python mysql support not available: %s') % err)
+
+ if node is None:
+ raise util.Abort(_('hook type %s does not pass a changeset id') %
+ hooktype)
+ try:
+ bz = bugzilla(ui, repo)
+ ctx = repo[node]
+ ids = bz.find_bug_ids(ctx)
+ if ids:
+ for id in ids:
+ bz.update(id, ctx)
+ bz.notify(ids, util.email(ctx.user()))
+ except MySQLdb.MySQLError, err:
+ raise util.Abort(_('database error: %s') % err[1])
+
diff --git a/sys/src/cmd/hg/hgext/children.py b/sys/src/cmd/hg/hgext/children.py
new file mode 100644
index 000000000..35ddeca43
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/children.py
@@ -0,0 +1,44 @@
+# Mercurial extension to provide the 'hg children' command
+#
+# Copyright 2007 by Intevation GmbH <intevation@intevation.de>
+#
+# Author(s):
+# Thomas Arendsen Hein <thomas@intevation.de>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''command to display child changesets'''
+
+from mercurial import cmdutil
+from mercurial.commands import templateopts
+from mercurial.i18n import _
+
+
+def children(ui, repo, file_=None, **opts):
+ """show the children of the given or working directory revision
+
+ Print the children of the working directory's revisions. If a
+ revision is given via -r/--rev, the children of that revision will
+ be printed. If a file argument is given, revision in which the
+ file was last changed (after the working directory revision or the
+ argument to --rev if given) is printed.
+ """
+ rev = opts.get('rev')
+ if file_:
+ ctx = repo.filectx(file_, changeid=rev)
+ else:
+ ctx = repo[rev]
+
+ displayer = cmdutil.show_changeset(ui, repo, opts)
+ for cctx in ctx.children():
+ displayer.show(cctx)
+
+
+cmdtable = {
+ "children":
+ (children,
+ [('r', 'rev', '', _('show children of the specified revision')),
+ ] + templateopts,
+ _('hg children [-r REV] [FILE]')),
+}
diff --git a/sys/src/cmd/hg/hgext/churn.py b/sys/src/cmd/hg/hgext/churn.py
new file mode 100644
index 000000000..930009ab3
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/churn.py
@@ -0,0 +1,174 @@
+# churn.py - create a graph of revisions count grouped by template
+#
+# Copyright 2006 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
+# Copyright 2008 Alexander Solovyov <piranha@piranha.org.ua>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''command to display statistics about repository history'''
+
+from mercurial.i18n import _
+from mercurial import patch, cmdutil, util, templater
+import sys, os
+import time, datetime
+
+def maketemplater(ui, repo, tmpl):
+ tmpl = templater.parsestring(tmpl, quoted=False)
+ try:
+ t = cmdutil.changeset_templater(ui, repo, False, None, None, False)
+ except SyntaxError, inst:
+ raise util.Abort(inst.args[0])
+ t.use_template(tmpl)
+ return t
+
+def changedlines(ui, repo, ctx1, ctx2, fns):
+ lines = 0
+ fmatch = cmdutil.matchfiles(repo, fns)
+ diff = ''.join(patch.diff(repo, ctx1.node(), ctx2.node(), fmatch))
+ for l in diff.split('\n'):
+ if (l.startswith("+") and not l.startswith("+++ ") or
+ l.startswith("-") and not l.startswith("--- ")):
+ lines += 1
+ return lines
+
+def countrate(ui, repo, amap, *pats, **opts):
+ """Calculate stats"""
+ if opts.get('dateformat'):
+ def getkey(ctx):
+ t, tz = ctx.date()
+ date = datetime.datetime(*time.gmtime(float(t) - tz)[:6])
+ return date.strftime(opts['dateformat'])
+ else:
+ tmpl = opts.get('template', '{author|email}')
+ tmpl = maketemplater(ui, repo, tmpl)
+ def getkey(ctx):
+ ui.pushbuffer()
+ tmpl.show(ctx)
+ return ui.popbuffer()
+
+ count = pct = 0
+ rate = {}
+ df = False
+ if opts.get('date'):
+ df = util.matchdate(opts['date'])
+
+ get = util.cachefunc(lambda r: repo[r].changeset())
+ changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
+ for st, rev, fns in changeiter:
+ if not st == 'add':
+ continue
+ if df and not df(get(rev)[2][0]): # doesn't match date format
+ continue
+
+ ctx = repo[rev]
+ key = getkey(ctx)
+ key = amap.get(key, key) # alias remap
+ if opts.get('changesets'):
+ rate[key] = rate.get(key, 0) + 1
+ else:
+ parents = ctx.parents()
+ if len(parents) > 1:
+ ui.note(_('Revision %d is a merge, ignoring...\n') % (rev,))
+ continue
+
+ ctx1 = parents[0]
+ lines = changedlines(ui, repo, ctx1, ctx, fns)
+ rate[key] = rate.get(key, 0) + lines
+
+ if opts.get('progress'):
+ count += 1
+ newpct = int(100.0 * count / max(len(repo), 1))
+ if pct < newpct:
+ pct = newpct
+ ui.write("\r" + _("generating stats: %d%%") % pct)
+ sys.stdout.flush()
+
+ if opts.get('progress'):
+ ui.write("\r")
+ sys.stdout.flush()
+
+ return rate
+
+
+def churn(ui, repo, *pats, **opts):
+ '''histogram of changes to the repository
+
+ This command will display a histogram representing the number
+ of changed lines or revisions, grouped according to the given
+ template. The default template will group changes by author.
+ The --dateformat option may be used to group the results by
+ date instead.
+
+ Statistics are based on the number of changed lines, or
+ alternatively the number of matching revisions if the
+ --changesets option is specified.
+
+ Examples::
+
+ # display count of changed lines for every committer
+ hg churn -t '{author|email}'
+
+ # display daily activity graph
+ hg churn -f '%H' -s -c
+
+ # display activity of developers by month
+ hg churn -f '%Y-%m' -s -c
+
+ # display count of lines changed in every year
+ hg churn -f '%Y' -s
+
+ It is possible to map alternate email addresses to a main address
+ by providing a file using the following format::
+
+ <alias email> <actual email>
+
+ Such a file may be specified with the --aliases option, otherwise
+ a .hgchurn file will be looked for in the working directory root.
+ '''
+ def pad(s, l):
+ return (s + " " * l)[:l]
+
+ amap = {}
+ aliases = opts.get('aliases')
+ if not aliases and os.path.exists(repo.wjoin('.hgchurn')):
+ aliases = repo.wjoin('.hgchurn')
+ if aliases:
+ for l in open(aliases, "r"):
+ l = l.strip()
+ alias, actual = l.split()
+ amap[alias] = actual
+
+ rate = countrate(ui, repo, amap, *pats, **opts).items()
+ if not rate:
+ return
+
+ sortkey = ((not opts.get('sort')) and (lambda x: -x[1]) or None)
+ rate.sort(key=sortkey)
+
+ maxcount = float(max([v for k, v in rate]))
+ maxname = max([len(k) for k, v in rate])
+
+ ttywidth = util.termwidth()
+ ui.debug(_("assuming %i character terminal\n") % ttywidth)
+ width = ttywidth - maxname - 2 - 6 - 2 - 2
+
+ for date, count in rate:
+ print "%s %6d %s" % (pad(date, maxname), count,
+ "*" * int(count * width / maxcount))
+
+
+cmdtable = {
+ "churn":
+ (churn,
+ [('r', 'rev', [], _('count rate for the specified revision or range')),
+ ('d', 'date', '', _('count rate for revisions matching date spec')),
+ ('t', 'template', '{author|email}', _('template to group changesets')),
+ ('f', 'dateformat', '',
+ _('strftime-compatible format for grouping by date')),
+ ('c', 'changesets', False, _('count rate by number of changesets')),
+ ('s', 'sort', False, _('sort by key (default: sort by count)')),
+ ('', 'aliases', '', _('file with email aliases')),
+ ('', 'progress', None, _('show progress'))],
+ _("hg churn [-d DATE] [-r REV] [--aliases FILE] [--progress] [FILE]")),
+}
diff --git a/sys/src/cmd/hg/hgext/color.py b/sys/src/cmd/hg/hgext/color.py
new file mode 100644
index 000000000..4a736db43
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/color.py
@@ -0,0 +1,286 @@
+# color.py color output for the status and qseries commands
+#
+# Copyright (C) 2007 Kevin Christen <kevin.christen@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+'''colorize output from some commands
+
+This extension modifies the status command to add color to its output
+to reflect file status, the qseries command to add color to reflect
+patch status (applied, unapplied, missing), and to diff-related
+commands to highlight additions, removals, diff headers, and trailing
+whitespace.
+
+Other effects in addition to color, like bold and underlined text, are
+also available. Effects are rendered with the ECMA-48 SGR control
+function (aka ANSI escape codes). This module also provides the
+render_text function, which can be used to add effects to any text.
+
+Default effects may be overridden from the .hgrc file::
+
+ [color]
+ status.modified = blue bold underline red_background
+ status.added = green bold
+ status.removed = red bold blue_background
+ status.deleted = cyan bold underline
+ status.unknown = magenta bold underline
+ status.ignored = black bold
+
+ # 'none' turns off all effects
+ status.clean = none
+ status.copied = none
+
+ qseries.applied = blue bold underline
+ qseries.unapplied = black bold
+ qseries.missing = red bold
+
+ diff.diffline = bold
+ diff.extended = cyan bold
+ diff.file_a = red bold
+ diff.file_b = green bold
+ diff.hunk = magenta
+ diff.deleted = red
+ diff.inserted = green
+ diff.changed = white
+ diff.trailingwhitespace = bold red_background
+'''
+
+import os, sys
+import itertools
+
+from mercurial import cmdutil, commands, extensions, error
+from mercurial.i18n import _
+
+# start and stop parameters for effects
+_effect_params = {'none': 0,
+ 'black': 30,
+ 'red': 31,
+ 'green': 32,
+ 'yellow': 33,
+ 'blue': 34,
+ 'magenta': 35,
+ 'cyan': 36,
+ 'white': 37,
+ 'bold': 1,
+ 'italic': 3,
+ 'underline': 4,
+ 'inverse': 7,
+ 'black_background': 40,
+ 'red_background': 41,
+ 'green_background': 42,
+ 'yellow_background': 43,
+ 'blue_background': 44,
+ 'purple_background': 45,
+ 'cyan_background': 46,
+ 'white_background': 47}
+
+def render_effects(text, effects):
+ 'Wrap text in commands to turn on each effect.'
+ start = [str(_effect_params[e]) for e in ['none'] + effects]
+ start = '\033[' + ';'.join(start) + 'm'
+ stop = '\033[' + str(_effect_params['none']) + 'm'
+ return ''.join([start, text, stop])
+
+def colorstatus(orig, ui, repo, *pats, **opts):
+ '''run the status command with colored output'''
+
+ delimiter = opts['print0'] and '\0' or '\n'
+
+ nostatus = opts.get('no_status')
+ opts['no_status'] = False
+ # run status and capture its output
+ ui.pushbuffer()
+ retval = orig(ui, repo, *pats, **opts)
+ # filter out empty strings
+ lines_with_status = [ line for line in ui.popbuffer().split(delimiter) if line ]
+
+ if nostatus:
+ lines = [l[2:] for l in lines_with_status]
+ else:
+ lines = lines_with_status
+
+ # apply color to output and display it
+ for i in xrange(len(lines)):
+ status = _status_abbreviations[lines_with_status[i][0]]
+ effects = _status_effects[status]
+ if effects:
+ lines[i] = render_effects(lines[i], effects)
+ ui.write(lines[i] + delimiter)
+ return retval
+
+_status_abbreviations = { 'M': 'modified',
+ 'A': 'added',
+ 'R': 'removed',
+ '!': 'deleted',
+ '?': 'unknown',
+ 'I': 'ignored',
+ 'C': 'clean',
+ ' ': 'copied', }
+
+_status_effects = { 'modified': ['blue', 'bold'],
+ 'added': ['green', 'bold'],
+ 'removed': ['red', 'bold'],
+ 'deleted': ['cyan', 'bold', 'underline'],
+ 'unknown': ['magenta', 'bold', 'underline'],
+ 'ignored': ['black', 'bold'],
+ 'clean': ['none'],
+ 'copied': ['none'], }
+
+def colorqseries(orig, ui, repo, *dummy, **opts):
+ '''run the qseries command with colored output'''
+ ui.pushbuffer()
+ retval = orig(ui, repo, **opts)
+ patchlines = ui.popbuffer().splitlines()
+ patchnames = repo.mq.series
+
+ for patch, patchname in itertools.izip(patchlines, patchnames):
+ if opts['missing']:
+ effects = _patch_effects['missing']
+ # Determine if patch is applied.
+ elif [ applied for applied in repo.mq.applied
+ if patchname == applied.name ]:
+ effects = _patch_effects['applied']
+ else:
+ effects = _patch_effects['unapplied']
+
+ patch = patch.replace(patchname, render_effects(patchname, effects), 1)
+ ui.write(patch + '\n')
+ return retval
+
+_patch_effects = { 'applied': ['blue', 'bold', 'underline'],
+ 'missing': ['red', 'bold'],
+ 'unapplied': ['black', 'bold'], }
+
+def colorwrap(orig, s):
+ '''wrap ui.write for colored diff output'''
+ lines = s.split('\n')
+ for i, line in enumerate(lines):
+ stripline = line
+ if line and line[0] in '+-':
+ # highlight trailing whitespace, but only in changed lines
+ stripline = line.rstrip()
+ for prefix, style in _diff_prefixes:
+ if stripline.startswith(prefix):
+ lines[i] = render_effects(stripline, _diff_effects[style])
+ break
+ if line != stripline:
+ lines[i] += render_effects(
+ line[len(stripline):], _diff_effects['trailingwhitespace'])
+ orig('\n'.join(lines))
+
+def colorshowpatch(orig, self, node):
+ '''wrap cmdutil.changeset_printer.showpatch with colored output'''
+ oldwrite = extensions.wrapfunction(self.ui, 'write', colorwrap)
+ try:
+ orig(self, node)
+ finally:
+ self.ui.write = oldwrite
+
+def colordiff(orig, ui, repo, *pats, **opts):
+ '''run the diff command with colored output'''
+ oldwrite = extensions.wrapfunction(ui, 'write', colorwrap)
+ try:
+ orig(ui, repo, *pats, **opts)
+ finally:
+ ui.write = oldwrite
+
+_diff_prefixes = [('diff', 'diffline'),
+ ('copy', 'extended'),
+ ('rename', 'extended'),
+ ('old', 'extended'),
+ ('new', 'extended'),
+ ('deleted', 'extended'),
+ ('---', 'file_a'),
+ ('+++', 'file_b'),
+ ('@', 'hunk'),
+ ('-', 'deleted'),
+ ('+', 'inserted')]
+
+_diff_effects = {'diffline': ['bold'],
+ 'extended': ['cyan', 'bold'],
+ 'file_a': ['red', 'bold'],
+ 'file_b': ['green', 'bold'],
+ 'hunk': ['magenta'],
+ 'deleted': ['red'],
+ 'inserted': ['green'],
+ 'changed': ['white'],
+ 'trailingwhitespace': ['bold', 'red_background']}
+
+_ui = None
+
+def uisetup(ui):
+ '''Initialize the extension.'''
+ global _ui
+ _ui = ui
+ _setupcmd(ui, 'diff', commands.table, colordiff, _diff_effects)
+ _setupcmd(ui, 'incoming', commands.table, None, _diff_effects)
+ _setupcmd(ui, 'log', commands.table, None, _diff_effects)
+ _setupcmd(ui, 'outgoing', commands.table, None, _diff_effects)
+ _setupcmd(ui, 'tip', commands.table, None, _diff_effects)
+ _setupcmd(ui, 'status', commands.table, colorstatus, _status_effects)
+
+def extsetup():
+ try:
+ mq = extensions.find('mq')
+ try:
+ # If we are loaded after mq, we must wrap commands.table
+ _setupcmd(_ui, 'qdiff', commands.table, colordiff, _diff_effects)
+ _setupcmd(_ui, 'qseries', commands.table, colorqseries, _patch_effects)
+ except error.UnknownCommand:
+ # Otherwise we wrap mq.cmdtable
+ _setupcmd(_ui, 'qdiff', mq.cmdtable, colordiff, _diff_effects)
+ _setupcmd(_ui, 'qseries', mq.cmdtable, colorqseries, _patch_effects)
+ except KeyError:
+ # The mq extension is not enabled
+ pass
+
+def _setupcmd(ui, cmd, table, func, effectsmap):
+ '''patch in command to command table and load effect map'''
+ def nocolor(orig, *args, **opts):
+
+ if (opts['no_color'] or opts['color'] == 'never' or
+ (opts['color'] == 'auto' and (os.environ.get('TERM') == 'dumb'
+ or not sys.__stdout__.isatty()))):
+ return orig(*args, **opts)
+
+ oldshowpatch = extensions.wrapfunction(cmdutil.changeset_printer,
+ 'showpatch', colorshowpatch)
+ try:
+ if func is not None:
+ return func(orig, *args, **opts)
+ return orig(*args, **opts)
+ finally:
+ cmdutil.changeset_printer.showpatch = oldshowpatch
+
+ entry = extensions.wrapcommand(table, cmd, nocolor)
+ entry[1].extend([
+ ('', 'color', 'auto', _("when to colorize (always, auto, or never)")),
+ ('', 'no-color', None, _("don't colorize output")),
+ ])
+
+ for status in effectsmap:
+ configkey = cmd + '.' + status
+ effects = ui.configlist('color', configkey)
+ if effects:
+ good = []
+ for e in effects:
+ if e in _effect_params:
+ good.append(e)
+ else:
+ ui.warn(_("ignoring unknown color/effect %r "
+ "(configured in color.%s)\n")
+ % (e, configkey))
+ effectsmap[status] = good
diff --git a/sys/src/cmd/hg/hgext/convert/__init__.py b/sys/src/cmd/hg/hgext/convert/__init__.py
new file mode 100644
index 000000000..2d04dc34a
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/convert/__init__.py
@@ -0,0 +1,296 @@
+# convert.py Foreign SCM converter
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''import revisions from foreign VCS repositories into Mercurial'''
+
+import convcmd
+import cvsps
+import subversion
+from mercurial import commands
+from mercurial.i18n import _
+
+# Commands definition was moved elsewhere to ease demandload job.
+
+def convert(ui, src, dest=None, revmapfile=None, **opts):
+ """convert a foreign SCM repository to a Mercurial one.
+
+ Accepted source formats [identifiers]:
+
+ - Mercurial [hg]
+ - CVS [cvs]
+ - Darcs [darcs]
+ - git [git]
+ - Subversion [svn]
+ - Monotone [mtn]
+ - GNU Arch [gnuarch]
+ - Bazaar [bzr]
+ - Perforce [p4]
+
+ Accepted destination formats [identifiers]:
+
+ - Mercurial [hg]
+ - Subversion [svn] (history on branches is not preserved)
+
+ If no revision is given, all revisions will be converted.
+ Otherwise, convert will only import up to the named revision
+ (given in a format understood by the source).
+
+ If no destination directory name is specified, it defaults to the
+ basename of the source with '-hg' appended. If the destination
+ repository doesn't exist, it will be created.
+
+ By default, all sources except Mercurial will use --branchsort.
+ Mercurial uses --sourcesort to preserve original revision numbers
+ order. Sort modes have the following effects:
+
+ --branchsort convert from parent to child revision when possible,
+ which means branches are usually converted one after
+ the other. It generates more compact repositories.
+
+ --datesort sort revisions by date. Converted repositories have
+ good-looking changelogs but are often an order of
+ magnitude larger than the same ones generated by
+ --branchsort.
+
+ --sourcesort try to preserve source revisions order, only
+ supported by Mercurial sources.
+
+ If <REVMAP> isn't given, it will be put in a default location
+ (<dest>/.hg/shamap by default). The <REVMAP> is a simple text file
+ that maps each source commit ID to the destination ID for that
+ revision, like so::
+
+ <source ID> <destination ID>
+
+ If the file doesn't exist, it's automatically created. It's
+ updated on each commit copied, so convert-repo can be interrupted
+ and can be run repeatedly to copy new commits.
+
+ The [username mapping] file is a simple text file that maps each
+ source commit author to a destination commit author. It is handy
+ for source SCMs that use unix logins to identify authors (eg:
+ CVS). One line per author mapping and the line format is:
+ srcauthor=whatever string you want
+
+ The filemap is a file that allows filtering and remapping of files
+ and directories. Comment lines start with '#'. Each line can
+ contain one of the following directives::
+
+ include path/to/file
+
+ exclude path/to/file
+
+ rename from/file to/file
+
+ The 'include' directive causes a file, or all files under a
+ directory, to be included in the destination repository, and the
+ exclusion of all other files and directories not explicitly
+ included. The 'exclude' directive causes files or directories to
+ be omitted. The 'rename' directive renames a file or directory. To
+ rename from a subdirectory into the root of the repository, use
+ '.' as the path to rename to.
+
+ The splicemap is a file that allows insertion of synthetic
+ history, letting you specify the parents of a revision. This is
+ useful if you want to e.g. give a Subversion merge two parents, or
+ graft two disconnected series of history together. Each entry
+ contains a key, followed by a space, followed by one or two
+ comma-separated values. The key is the revision ID in the source
+ revision control system whose parents should be modified (same
+ format as a key in .hg/shamap). The values are the revision IDs
+ (in either the source or destination revision control system) that
+ should be used as the new parents for that node.
+
+ The branchmap is a file that allows you to rename a branch when it is
+ being brought in from whatever external repository. When used in
+ conjunction with a splicemap, it allows for a powerful combination
+ to help fix even the most badly mismanaged repositories and turn them
+ into nicely structured Mercurial repositories. The branchmap contains
+ lines of the form "original_branch_name new_branch_name".
+ "original_branch_name" is the name of the branch in the source
+ repository, and "new_branch_name" is the name of the branch is the
+ destination repository. This can be used to (for instance) move code
+ in one repository from "default" to a named branch.
+
+ Mercurial Source
+ ----------------
+
+ --config convert.hg.ignoreerrors=False (boolean)
+ ignore integrity errors when reading. Use it to fix Mercurial
+ repositories with missing revlogs, by converting from and to
+ Mercurial.
+ --config convert.hg.saverev=False (boolean)
+ store original revision ID in changeset (forces target IDs to
+ change)
+ --config convert.hg.startrev=0 (hg revision identifier)
+ convert start revision and its descendants
+
+ CVS Source
+ ----------
+
+ CVS source will use a sandbox (i.e. a checked-out copy) from CVS
+ to indicate the starting point of what will be converted. Direct
+ access to the repository files is not needed, unless of course the
+ repository is :local:. The conversion uses the top level directory
+ in the sandbox to find the CVS repository, and then uses CVS rlog
+ commands to find files to convert. This means that unless a
+ filemap is given, all files under the starting directory will be
+ converted, and that any directory reorganization in the CVS
+ sandbox is ignored.
+
+ Because CVS does not have changesets, it is necessary to collect
+ individual commits to CVS and merge them into changesets. CVS
+ source uses its internal changeset merging code by default but can
+ be configured to call the external 'cvsps' program by setting::
+
+ --config convert.cvsps='cvsps -A -u --cvs-direct -q'
+
+ This option is deprecated and will be removed in Mercurial 1.4.
+
+ The options shown are the defaults.
+
+ Internal cvsps is selected by setting ::
+
+ --config convert.cvsps=builtin
+
+ and has a few more configurable options:
+
+ --config convert.cvsps.cache=True (boolean)
+ Set to False to disable remote log caching, for testing and
+ debugging purposes.
+ --config convert.cvsps.fuzz=60 (integer)
+ Specify the maximum time (in seconds) that is allowed between
+ commits with identical user and log message in a single
+ changeset. When very large files were checked in as part of a
+ changeset then the default may not be long enough.
+ --config convert.cvsps.mergeto='{{mergetobranch ([-\\w]+)}}'
+ Specify a regular expression to which commit log messages are
+ matched. If a match occurs, then the conversion process will
+ insert a dummy revision merging the branch on which this log
+ message occurs to the branch indicated in the regex.
+ --config convert.cvsps.mergefrom='{{mergefrombranch ([-\\w]+)}}'
+ Specify a regular expression to which commit log messages are
+ matched. If a match occurs, then the conversion process will
+ add the most recent revision on the branch indicated in the
+ regex as the second parent of the changeset.
+
+ The hgext/convert/cvsps wrapper script allows the builtin
+ changeset merging code to be run without doing a conversion. Its
+ parameters and output are similar to that of cvsps 2.1.
+
+ Subversion Source
+ -----------------
+
+ Subversion source detects classical trunk/branches/tags layouts.
+ By default, the supplied "svn://repo/path/" source URL is
+ converted as a single branch. If "svn://repo/path/trunk" exists it
+ replaces the default branch. If "svn://repo/path/branches" exists,
+ its subdirectories are listed as possible branches. If
+ "svn://repo/path/tags" exists, it is looked for tags referencing
+ converted branches. Default "trunk", "branches" and "tags" values
+ can be overridden with following options. Set them to paths
+ relative to the source URL, or leave them blank to disable auto
+ detection.
+
+ --config convert.svn.branches=branches (directory name)
+ specify the directory containing branches
+ --config convert.svn.tags=tags (directory name)
+ specify the directory containing tags
+ --config convert.svn.trunk=trunk (directory name)
+ specify the name of the trunk branch
+
+ Source history can be retrieved starting at a specific revision,
+ instead of being integrally converted. Only single branch
+ conversions are supported.
+
+ --config convert.svn.startrev=0 (svn revision number)
+ specify start Subversion revision.
+
+ Perforce Source
+ ---------------
+
+ The Perforce (P4) importer can be given a p4 depot path or a
+ client specification as source. It will convert all files in the
+ source to a flat Mercurial repository, ignoring labels, branches
+ and integrations. Note that when a depot path is given you then
+ usually should specify a target directory, because otherwise the
+ target may be named ...-hg.
+
+ It is possible to limit the amount of source history to be
+ converted by specifying an initial Perforce revision.
+
+ --config convert.p4.startrev=0 (perforce changelist number)
+ specify initial Perforce revision.
+
+ Mercurial Destination
+ ---------------------
+
+ --config convert.hg.clonebranches=False (boolean)
+ dispatch source branches in separate clones.
+ --config convert.hg.tagsbranch=default (branch name)
+ tag revisions branch name
+ --config convert.hg.usebranchnames=True (boolean)
+ preserve branch names
+
+ """
+ return convcmd.convert(ui, src, dest, revmapfile, **opts)
+
+def debugsvnlog(ui, **opts):
+ return subversion.debugsvnlog(ui, **opts)
+
+def debugcvsps(ui, *args, **opts):
+ '''create changeset information from CVS
+
+ This command is intended as a debugging tool for the CVS to
+ Mercurial converter, and can be used as a direct replacement for
+ cvsps.
+
+ Hg debugcvsps reads the CVS rlog for current directory (or any
+ named directory) in the CVS repository, and converts the log to a
+ series of changesets based on matching commit log entries and
+ dates.'''
+ return cvsps.debugcvsps(ui, *args, **opts)
+
+commands.norepo += " convert debugsvnlog debugcvsps"
+
+cmdtable = {
+ "convert":
+ (convert,
+ [('A', 'authors', '', _('username mapping filename')),
+ ('d', 'dest-type', '', _('destination repository type')),
+ ('', 'filemap', '', _('remap file names using contents of file')),
+ ('r', 'rev', '', _('import up to target revision REV')),
+ ('s', 'source-type', '', _('source repository type')),
+ ('', 'splicemap', '', _('splice synthesized history into place')),
+ ('', 'branchmap', '', _('change branch names while converting')),
+ ('', 'branchsort', None, _('try to sort changesets by branches')),
+ ('', 'datesort', None, _('try to sort changesets by date')),
+ ('', 'sourcesort', None, _('preserve source changesets order'))],
+ _('hg convert [OPTION]... SOURCE [DEST [REVMAP]]')),
+ "debugsvnlog":
+ (debugsvnlog,
+ [],
+ 'hg debugsvnlog'),
+ "debugcvsps":
+ (debugcvsps,
+ [
+ # Main options shared with cvsps-2.1
+ ('b', 'branches', [], _('only return changes on specified branches')),
+ ('p', 'prefix', '', _('prefix to remove from file names')),
+ ('r', 'revisions', [], _('only return changes after or between specified tags')),
+ ('u', 'update-cache', None, _("update cvs log cache")),
+ ('x', 'new-cache', None, _("create new cvs log cache")),
+ ('z', 'fuzz', 60, _('set commit time fuzz in seconds')),
+ ('', 'root', '', _('specify cvsroot')),
+ # Options specific to builtin cvsps
+ ('', 'parents', '', _('show parent changesets')),
+ ('', 'ancestors', '', _('show current changeset in ancestor branches')),
+ # Options that are ignored for compatibility with cvsps-2.1
+ ('A', 'cvs-direct', None, _('ignored for compatibility')),
+ ],
+ _('hg debugcvsps [OPTION]... [PATH]...')),
+}
diff --git a/sys/src/cmd/hg/hgext/convert/bzr.py b/sys/src/cmd/hg/hgext/convert/bzr.py
new file mode 100644
index 000000000..6d2abe0bb
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/convert/bzr.py
@@ -0,0 +1,259 @@
+# bzr.py - bzr support for the convert extension
+#
+# Copyright 2008, 2009 Marek Kubica <marek@xivilization.net> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+# This module is for handling 'bzr', that was formerly known as Bazaar-NG;
+# it cannot access 'bar' repositories, but they were never used very much
+
+import os
+from mercurial import demandimport
+# these do not work with demandimport, blacklist
+demandimport.ignore.extend([
+ 'bzrlib.transactions',
+ 'bzrlib.urlutils',
+ ])
+
+from mercurial.i18n import _
+from mercurial import util
+from common import NoRepo, commit, converter_source
+
+try:
+ # bazaar imports
+ from bzrlib import branch, revision, errors
+ from bzrlib.revisionspec import RevisionSpec
+except ImportError:
+ pass
+
+supportedkinds = ('file', 'symlink')
+
+class bzr_source(converter_source):
+ """Reads Bazaar repositories by using the Bazaar Python libraries"""
+
+ def __init__(self, ui, path, rev=None):
+ super(bzr_source, self).__init__(ui, path, rev=rev)
+
+ if not os.path.exists(os.path.join(path, '.bzr')):
+ raise NoRepo('%s does not look like a Bazaar repo' % path)
+
+ try:
+ # access bzrlib stuff
+ branch
+ except NameError:
+ raise NoRepo('Bazaar modules could not be loaded')
+
+ path = os.path.abspath(path)
+ self._checkrepotype(path)
+ self.branch = branch.Branch.open(path)
+ self.sourcerepo = self.branch.repository
+ self._parentids = {}
+
+ def _checkrepotype(self, path):
+ # Lightweight checkouts detection is informational but probably
+ # fragile at API level. It should not terminate the conversion.
+ try:
+ from bzrlib import bzrdir
+ dir = bzrdir.BzrDir.open_containing(path)[0]
+ try:
+ tree = dir.open_workingtree(recommend_upgrade=False)
+ branch = tree.branch
+ except (errors.NoWorkingTree, errors.NotLocalUrl), e:
+ tree = None
+ branch = dir.open_branch()
+ if (tree is not None and tree.bzrdir.root_transport.base !=
+ branch.bzrdir.root_transport.base):
+ self.ui.warn(_('warning: lightweight checkouts may cause '
+ 'conversion failures, try with a regular '
+ 'branch instead.\n'))
+ except:
+ self.ui.note(_('bzr source type could not be determined\n'))
+
+ def before(self):
+ """Before the conversion begins, acquire a read lock
+ for all the operations that might need it. Fortunately
+ read locks don't block other reads or writes to the
+ repository, so this shouldn't have any impact on the usage of
+ the source repository.
+
+ The alternative would be locking on every operation that
+ needs locks (there are currently two: getting the file and
+ getting the parent map) and releasing immediately after,
+ but this approach can take even 40% longer."""
+ self.sourcerepo.lock_read()
+
+ def after(self):
+ self.sourcerepo.unlock()
+
+ def getheads(self):
+ if not self.rev:
+ return [self.branch.last_revision()]
+ try:
+ r = RevisionSpec.from_string(self.rev)
+ info = r.in_history(self.branch)
+ except errors.BzrError:
+ raise util.Abort(_('%s is not a valid revision in current branch')
+ % self.rev)
+ return [info.rev_id]
+
+ def getfile(self, name, rev):
+ revtree = self.sourcerepo.revision_tree(rev)
+ fileid = revtree.path2id(name.decode(self.encoding or 'utf-8'))
+ kind = None
+ if fileid is not None:
+ kind = revtree.kind(fileid)
+ if kind not in supportedkinds:
+ # the file is not available anymore - was deleted
+ raise IOError(_('%s is not available in %s anymore') %
+ (name, rev))
+ if kind == 'symlink':
+ target = revtree.get_symlink_target(fileid)
+ if target is None:
+ raise util.Abort(_('%s.%s symlink has no target')
+ % (name, rev))
+ return target
+ else:
+ sio = revtree.get_file(fileid)
+ return sio.read()
+
+ def getmode(self, name, rev):
+ return self._modecache[(name, rev)]
+
+ def getchanges(self, version):
+ # set up caches: modecache and revtree
+ self._modecache = {}
+ self._revtree = self.sourcerepo.revision_tree(version)
+ # get the parentids from the cache
+ parentids = self._parentids.pop(version)
+ # only diff against first parent id
+ prevtree = self.sourcerepo.revision_tree(parentids[0])
+ return self._gettreechanges(self._revtree, prevtree)
+
+ def getcommit(self, version):
+ rev = self.sourcerepo.get_revision(version)
+ # populate parent id cache
+ if not rev.parent_ids:
+ parents = []
+ self._parentids[version] = (revision.NULL_REVISION,)
+ else:
+ parents = self._filterghosts(rev.parent_ids)
+ self._parentids[version] = parents
+
+ return commit(parents=parents,
+ date='%d %d' % (rev.timestamp, -rev.timezone),
+ author=self.recode(rev.committer),
+ # bzr returns bytestrings or unicode, depending on the content
+ desc=self.recode(rev.message),
+ rev=version)
+
+ def gettags(self):
+ if not self.branch.supports_tags():
+ return {}
+ tagdict = self.branch.tags.get_tag_dict()
+ bytetags = {}
+ for name, rev in tagdict.iteritems():
+ bytetags[self.recode(name)] = rev
+ return bytetags
+
+ def getchangedfiles(self, rev, i):
+ self._modecache = {}
+ curtree = self.sourcerepo.revision_tree(rev)
+ if i is not None:
+ parentid = self._parentids[rev][i]
+ else:
+ # no parent id, get the empty revision
+ parentid = revision.NULL_REVISION
+
+ prevtree = self.sourcerepo.revision_tree(parentid)
+ changes = [e[0] for e in self._gettreechanges(curtree, prevtree)[0]]
+ return changes
+
+ def _gettreechanges(self, current, origin):
+ revid = current._revision_id;
+ changes = []
+ renames = {}
+ for (fileid, paths, changed_content, versioned, parent, name,
+ kind, executable) in current.iter_changes(origin):
+
+ if paths[0] == u'' or paths[1] == u'':
+ # ignore changes to tree root
+ continue
+
+ # bazaar tracks directories, mercurial does not, so
+ # we have to rename the directory contents
+ if kind[1] == 'directory':
+ if kind[0] not in (None, 'directory'):
+ # Replacing 'something' with a directory, record it
+ # so it can be removed.
+ changes.append((self.recode(paths[0]), revid))
+
+ if None not in paths and paths[0] != paths[1]:
+ # neither an add nor an delete - a move
+ # rename all directory contents manually
+ subdir = origin.inventory.path2id(paths[0])
+ # get all child-entries of the directory
+ for name, entry in origin.inventory.iter_entries(subdir):
+ # hg does not track directory renames
+ if entry.kind == 'directory':
+ continue
+ frompath = self.recode(paths[0] + '/' + name)
+ topath = self.recode(paths[1] + '/' + name)
+ # register the files as changed
+ changes.append((frompath, revid))
+ changes.append((topath, revid))
+ # add to mode cache
+ mode = ((entry.executable and 'x') or (entry.kind == 'symlink' and 's')
+ or '')
+ self._modecache[(topath, revid)] = mode
+ # register the change as move
+ renames[topath] = frompath
+
+ # no futher changes, go to the next change
+ continue
+
+ # we got unicode paths, need to convert them
+ path, topath = [self.recode(part) for part in paths]
+
+ if topath is None:
+ # file deleted
+ changes.append((path, revid))
+ continue
+
+ # renamed
+ if path and path != topath:
+ renames[topath] = path
+ changes.append((path, revid))
+
+ # populate the mode cache
+ kind, executable = [e[1] for e in (kind, executable)]
+ mode = ((executable and 'x') or (kind == 'symlink' and 'l')
+ or '')
+ self._modecache[(topath, revid)] = mode
+ changes.append((topath, revid))
+
+ return changes, renames
+
+ def _filterghosts(self, ids):
+ """Filters out ghost revisions which hg does not support, see
+ <http://bazaar-vcs.org/GhostRevision>
+ """
+ parentmap = self.sourcerepo.get_parent_map(ids)
+ parents = tuple([parent for parent in ids if parent in parentmap])
+ return parents
+
+ def recode(self, s, encoding=None):
+ """This version of recode tries to encode unicode to bytecode,
+ and preferably using the UTF-8 codec.
+ Other types than Unicode are silently returned, this is by
+ intention, e.g. the None-type is not going to be encoded but instead
+ just passed through
+ """
+ if not encoding:
+ encoding = self.encoding or 'utf-8'
+
+ if isinstance(s, unicode):
+ return s.encode(encoding)
+ else:
+ # leave it alone
+ return s
diff --git a/sys/src/cmd/hg/hgext/convert/common.py b/sys/src/cmd/hg/hgext/convert/common.py
new file mode 100644
index 000000000..0519d99a0
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/convert/common.py
@@ -0,0 +1,389 @@
+# common.py - common code for the convert extension
+#
+# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import base64, errno
+import os
+import cPickle as pickle
+from mercurial import util
+from mercurial.i18n import _
+
+def encodeargs(args):
+ def encodearg(s):
+ lines = base64.encodestring(s)
+ lines = [l.splitlines()[0] for l in lines]
+ return ''.join(lines)
+
+ s = pickle.dumps(args)
+ return encodearg(s)
+
+def decodeargs(s):
+ s = base64.decodestring(s)
+ return pickle.loads(s)
+
+class MissingTool(Exception): pass
+
+def checktool(exe, name=None, abort=True):
+ name = name or exe
+ if not util.find_exe(exe):
+ exc = abort and util.Abort or MissingTool
+ raise exc(_('cannot find required "%s" tool') % name)
+
+class NoRepo(Exception): pass
+
+SKIPREV = 'SKIP'
+
+class commit(object):
+ def __init__(self, author, date, desc, parents, branch=None, rev=None,
+ extra={}, sortkey=None):
+ self.author = author or 'unknown'
+ self.date = date or '0 0'
+ self.desc = desc
+ self.parents = parents
+ self.branch = branch
+ self.rev = rev
+ self.extra = extra
+ self.sortkey = sortkey
+
+class converter_source(object):
+ """Conversion source interface"""
+
+ def __init__(self, ui, path=None, rev=None):
+ """Initialize conversion source (or raise NoRepo("message")
+ exception if path is not a valid repository)"""
+ self.ui = ui
+ self.path = path
+ self.rev = rev
+
+ self.encoding = 'utf-8'
+
+ def before(self):
+ pass
+
+ def after(self):
+ pass
+
+ def setrevmap(self, revmap):
+ """set the map of already-converted revisions"""
+ pass
+
+ def getheads(self):
+ """Return a list of this repository's heads"""
+ raise NotImplementedError()
+
+ def getfile(self, name, rev):
+ """Return file contents as a string. rev is the identifier returned
+ by a previous call to getchanges(). Raise IOError to indicate that
+ name was deleted in rev.
+ """
+ raise NotImplementedError()
+
+ def getmode(self, name, rev):
+ """Return file mode, eg. '', 'x', or 'l'. rev is the identifier
+ returned by a previous call to getchanges().
+ """
+ raise NotImplementedError()
+
+ def getchanges(self, version):
+ """Returns a tuple of (files, copies).
+
+ files is a sorted list of (filename, id) tuples for all files
+ changed between version and its first parent returned by
+ getcommit(). id is the source revision id of the file.
+
+ copies is a dictionary of dest: source
+ """
+ raise NotImplementedError()
+
+ def getcommit(self, version):
+ """Return the commit object for version"""
+ raise NotImplementedError()
+
+ def gettags(self):
+ """Return the tags as a dictionary of name: revision
+
+ Tag names must be UTF-8 strings.
+ """
+ raise NotImplementedError()
+
+ def recode(self, s, encoding=None):
+ if not encoding:
+ encoding = self.encoding or 'utf-8'
+
+ if isinstance(s, unicode):
+ return s.encode("utf-8")
+ try:
+ return s.decode(encoding).encode("utf-8")
+ except:
+ try:
+ return s.decode("latin-1").encode("utf-8")
+ except:
+ return s.decode(encoding, "replace").encode("utf-8")
+
+ def getchangedfiles(self, rev, i):
+ """Return the files changed by rev compared to parent[i].
+
+ i is an index selecting one of the parents of rev. The return
+ value should be the list of files that are different in rev and
+ this parent.
+
+ If rev has no parents, i is None.
+
+ This function is only needed to support --filemap
+ """
+ raise NotImplementedError()
+
+ def converted(self, rev, sinkrev):
+ '''Notify the source that a revision has been converted.'''
+ pass
+
+ def hasnativeorder(self):
+ """Return true if this source has a meaningful, native revision
+ order. For instance, Mercurial revisions are store sequentially
+ while there is no such global ordering with Darcs.
+ """
+ return False
+
+ def lookuprev(self, rev):
+ """If rev is a meaningful revision reference in source, return
+ the referenced identifier in the same format used by getcommit().
+ return None otherwise.
+ """
+ return None
+
+class converter_sink(object):
+ """Conversion sink (target) interface"""
+
+ def __init__(self, ui, path):
+ """Initialize conversion sink (or raise NoRepo("message")
+ exception if path is not a valid repository)
+
+ created is a list of paths to remove if a fatal error occurs
+ later"""
+ self.ui = ui
+ self.path = path
+ self.created = []
+
+ def getheads(self):
+ """Return a list of this repository's heads"""
+ raise NotImplementedError()
+
+ def revmapfile(self):
+ """Path to a file that will contain lines
+ source_rev_id sink_rev_id
+ mapping equivalent revision identifiers for each system."""
+ raise NotImplementedError()
+
+ def authorfile(self):
+ """Path to a file that will contain lines
+ srcauthor=dstauthor
+ mapping equivalent authors identifiers for each system."""
+ return None
+
+ def putcommit(self, files, copies, parents, commit, source, revmap):
+ """Create a revision with all changed files listed in 'files'
+ and having listed parents. 'commit' is a commit object
+ containing at a minimum the author, date, and message for this
+ changeset. 'files' is a list of (path, version) tuples,
+ 'copies' is a dictionary mapping destinations to sources,
+ 'source' is the source repository, and 'revmap' is a mapfile
+ of source revisions to converted revisions. Only getfile(),
+ getmode(), and lookuprev() should be called on 'source'.
+
+ Note that the sink repository is not told to update itself to
+ a particular revision (or even what that revision would be)
+ before it receives the file data.
+ """
+ raise NotImplementedError()
+
+ def puttags(self, tags):
+ """Put tags into sink.
+
+ tags: {tagname: sink_rev_id, ...} where tagname is an UTF-8 string.
+ """
+ raise NotImplementedError()
+
+ def setbranch(self, branch, pbranches):
+ """Set the current branch name. Called before the first putcommit
+ on the branch.
+ branch: branch name for subsequent commits
+ pbranches: (converted parent revision, parent branch) tuples"""
+ pass
+
+ def setfilemapmode(self, active):
+ """Tell the destination that we're using a filemap
+
+ Some converter_sources (svn in particular) can claim that a file
+ was changed in a revision, even if there was no change. This method
+ tells the destination that we're using a filemap and that it should
+ filter empty revisions.
+ """
+ pass
+
+ def before(self):
+ pass
+
+ def after(self):
+ pass
+
+
+class commandline(object):
+ def __init__(self, ui, command):
+ self.ui = ui
+ self.command = command
+
+ def prerun(self):
+ pass
+
+ def postrun(self):
+ pass
+
+ def _cmdline(self, cmd, *args, **kwargs):
+ cmdline = [self.command, cmd] + list(args)
+ for k, v in kwargs.iteritems():
+ if len(k) == 1:
+ cmdline.append('-' + k)
+ else:
+ cmdline.append('--' + k.replace('_', '-'))
+ try:
+ if len(k) == 1:
+ cmdline.append('' + v)
+ else:
+ cmdline[-1] += '=' + v
+ except TypeError:
+ pass
+ cmdline = [util.shellquote(arg) for arg in cmdline]
+ if not self.ui.debugflag:
+ cmdline += ['2>', util.nulldev]
+ cmdline += ['<', util.nulldev]
+ cmdline = ' '.join(cmdline)
+ return cmdline
+
+ def _run(self, cmd, *args, **kwargs):
+ cmdline = self._cmdline(cmd, *args, **kwargs)
+ self.ui.debug(_('running: %s\n') % (cmdline,))
+ self.prerun()
+ try:
+ return util.popen(cmdline)
+ finally:
+ self.postrun()
+
+ def run(self, cmd, *args, **kwargs):
+ fp = self._run(cmd, *args, **kwargs)
+ output = fp.read()
+ self.ui.debug(output)
+ return output, fp.close()
+
+ def runlines(self, cmd, *args, **kwargs):
+ fp = self._run(cmd, *args, **kwargs)
+ output = fp.readlines()
+ self.ui.debug(''.join(output))
+ return output, fp.close()
+
+ def checkexit(self, status, output=''):
+ if status:
+ if output:
+ self.ui.warn(_('%s error:\n') % self.command)
+ self.ui.warn(output)
+ msg = util.explain_exit(status)[0]
+ raise util.Abort('%s %s' % (self.command, msg))
+
+ def run0(self, cmd, *args, **kwargs):
+ output, status = self.run(cmd, *args, **kwargs)
+ self.checkexit(status, output)
+ return output
+
+ def runlines0(self, cmd, *args, **kwargs):
+ output, status = self.runlines(cmd, *args, **kwargs)
+ self.checkexit(status, ''.join(output))
+ return output
+
+ def getargmax(self):
+ if '_argmax' in self.__dict__:
+ return self._argmax
+
+ # POSIX requires at least 4096 bytes for ARG_MAX
+ self._argmax = 4096
+ try:
+ self._argmax = os.sysconf("SC_ARG_MAX")
+ except:
+ pass
+
+ # Windows shells impose their own limits on command line length,
+ # down to 2047 bytes for cmd.exe under Windows NT/2k and 2500 bytes
+ # for older 4nt.exe. See http://support.microsoft.com/kb/830473 for
+ # details about cmd.exe limitations.
+
+ # Since ARG_MAX is for command line _and_ environment, lower our limit
+ # (and make happy Windows shells while doing this).
+
+ self._argmax = self._argmax/2 - 1
+ return self._argmax
+
+ def limit_arglist(self, arglist, cmd, *args, **kwargs):
+ limit = self.getargmax() - len(self._cmdline(cmd, *args, **kwargs))
+ bytes = 0
+ fl = []
+ for fn in arglist:
+ b = len(fn) + 3
+ if bytes + b < limit or len(fl) == 0:
+ fl.append(fn)
+ bytes += b
+ else:
+ yield fl
+ fl = [fn]
+ bytes = b
+ if fl:
+ yield fl
+
+ def xargs(self, arglist, cmd, *args, **kwargs):
+ for l in self.limit_arglist(arglist, cmd, *args, **kwargs):
+ self.run0(cmd, *(list(args) + l), **kwargs)
+
+class mapfile(dict):
+ def __init__(self, ui, path):
+ super(mapfile, self).__init__()
+ self.ui = ui
+ self.path = path
+ self.fp = None
+ self.order = []
+ self._read()
+
+ def _read(self):
+ if not self.path:
+ return
+ try:
+ fp = open(self.path, 'r')
+ except IOError, err:
+ if err.errno != errno.ENOENT:
+ raise
+ return
+ for i, line in enumerate(fp):
+ try:
+ key, value = line[:-1].rsplit(' ', 1)
+ except ValueError:
+ raise util.Abort(_('syntax error in %s(%d): key/value pair expected')
+ % (self.path, i+1))
+ if key not in self:
+ self.order.append(key)
+ super(mapfile, self).__setitem__(key, value)
+ fp.close()
+
+ def __setitem__(self, key, value):
+ if self.fp is None:
+ try:
+ self.fp = open(self.path, 'a')
+ except IOError, err:
+ raise util.Abort(_('could not open map file %r: %s') %
+ (self.path, err.strerror))
+ self.fp.write('%s %s\n' % (key, value))
+ self.fp.flush()
+ super(mapfile, self).__setitem__(key, value)
+
+ def close(self):
+ if self.fp:
+ self.fp.close()
+ self.fp = None
diff --git a/sys/src/cmd/hg/hgext/convert/convcmd.py b/sys/src/cmd/hg/hgext/convert/convcmd.py
new file mode 100644
index 000000000..50be03af0
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/convert/convcmd.py
@@ -0,0 +1,396 @@
+# convcmd - convert extension commands definition
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from common import NoRepo, MissingTool, SKIPREV, mapfile
+from cvs import convert_cvs
+from darcs import darcs_source
+from git import convert_git
+from hg import mercurial_source, mercurial_sink
+from subversion import svn_source, svn_sink
+from monotone import monotone_source
+from gnuarch import gnuarch_source
+from bzr import bzr_source
+from p4 import p4_source
+import filemap
+
+import os, shutil
+from mercurial import hg, util, encoding
+from mercurial.i18n import _
+
+orig_encoding = 'ascii'
+
+def recode(s):
+ if isinstance(s, unicode):
+ return s.encode(orig_encoding, 'replace')
+ else:
+ return s.decode('utf-8').encode(orig_encoding, 'replace')
+
+source_converters = [
+ ('cvs', convert_cvs, 'branchsort'),
+ ('git', convert_git, 'branchsort'),
+ ('svn', svn_source, 'branchsort'),
+ ('hg', mercurial_source, 'sourcesort'),
+ ('darcs', darcs_source, 'branchsort'),
+ ('mtn', monotone_source, 'branchsort'),
+ ('gnuarch', gnuarch_source, 'branchsort'),
+ ('bzr', bzr_source, 'branchsort'),
+ ('p4', p4_source, 'branchsort'),
+ ]
+
+sink_converters = [
+ ('hg', mercurial_sink),
+ ('svn', svn_sink),
+ ]
+
+def convertsource(ui, path, type, rev):
+ exceptions = []
+ for name, source, sortmode in source_converters:
+ try:
+ if not type or name == type:
+ return source(ui, path, rev), sortmode
+ except (NoRepo, MissingTool), inst:
+ exceptions.append(inst)
+ if not ui.quiet:
+ for inst in exceptions:
+ ui.write("%s\n" % inst)
+ raise util.Abort(_('%s: missing or unsupported repository') % path)
+
+def convertsink(ui, path, type):
+ for name, sink in sink_converters:
+ try:
+ if not type or name == type:
+ return sink(ui, path)
+ except NoRepo, inst:
+ ui.note(_("convert: %s\n") % inst)
+ raise util.Abort(_('%s: unknown repository type') % path)
+
+class converter(object):
+ def __init__(self, ui, source, dest, revmapfile, opts):
+
+ self.source = source
+ self.dest = dest
+ self.ui = ui
+ self.opts = opts
+ self.commitcache = {}
+ self.authors = {}
+ self.authorfile = None
+
+ # Record converted revisions persistently: maps source revision
+ # ID to target revision ID (both strings). (This is how
+ # incremental conversions work.)
+ self.map = mapfile(ui, revmapfile)
+
+ # Read first the dst author map if any
+ authorfile = self.dest.authorfile()
+ if authorfile and os.path.exists(authorfile):
+ self.readauthormap(authorfile)
+ # Extend/Override with new author map if necessary
+ if opts.get('authors'):
+ self.readauthormap(opts.get('authors'))
+ self.authorfile = self.dest.authorfile()
+
+ self.splicemap = mapfile(ui, opts.get('splicemap'))
+ self.branchmap = mapfile(ui, opts.get('branchmap'))
+
+ def walktree(self, heads):
+ '''Return a mapping that identifies the uncommitted parents of every
+ uncommitted changeset.'''
+ visit = heads
+ known = set()
+ parents = {}
+ while visit:
+ n = visit.pop(0)
+ if n in known or n in self.map: continue
+ known.add(n)
+ commit = self.cachecommit(n)
+ parents[n] = []
+ for p in commit.parents:
+ parents[n].append(p)
+ visit.append(p)
+
+ return parents
+
+ def toposort(self, parents, sortmode):
+ '''Return an ordering such that every uncommitted changeset is
+ preceeded by all its uncommitted ancestors.'''
+
+ def mapchildren(parents):
+ """Return a (children, roots) tuple where 'children' maps parent
+ revision identifiers to children ones, and 'roots' is the list of
+ revisions without parents. 'parents' must be a mapping of revision
+ identifier to its parents ones.
+ """
+ visit = parents.keys()
+ seen = set()
+ children = {}
+ roots = []
+
+ while visit:
+ n = visit.pop(0)
+ if n in seen:
+ continue
+ seen.add(n)
+ # Ensure that nodes without parents are present in the
+ # 'children' mapping.
+ children.setdefault(n, [])
+ hasparent = False
+ for p in parents[n]:
+ if not p in self.map:
+ visit.append(p)
+ hasparent = True
+ children.setdefault(p, []).append(n)
+ if not hasparent:
+ roots.append(n)
+
+ return children, roots
+
+ # Sort functions are supposed to take a list of revisions which
+ # can be converted immediately and pick one
+
+ def makebranchsorter():
+ """If the previously converted revision has a child in the
+ eligible revisions list, pick it. Return the list head
+ otherwise. Branch sort attempts to minimize branch
+ switching, which is harmful for Mercurial backend
+ compression.
+ """
+ prev = [None]
+ def picknext(nodes):
+ next = nodes[0]
+ for n in nodes:
+ if prev[0] in parents[n]:
+ next = n
+ break
+ prev[0] = next
+ return next
+ return picknext
+
+ def makesourcesorter():
+ """Source specific sort."""
+ keyfn = lambda n: self.commitcache[n].sortkey
+ def picknext(nodes):
+ return sorted(nodes, key=keyfn)[0]
+ return picknext
+
+ def makedatesorter():
+ """Sort revisions by date."""
+ dates = {}
+ def getdate(n):
+ if n not in dates:
+ dates[n] = util.parsedate(self.commitcache[n].date)
+ return dates[n]
+
+ def picknext(nodes):
+ return min([(getdate(n), n) for n in nodes])[1]
+
+ return picknext
+
+ if sortmode == 'branchsort':
+ picknext = makebranchsorter()
+ elif sortmode == 'datesort':
+ picknext = makedatesorter()
+ elif sortmode == 'sourcesort':
+ picknext = makesourcesorter()
+ else:
+ raise util.Abort(_('unknown sort mode: %s') % sortmode)
+
+ children, actives = mapchildren(parents)
+
+ s = []
+ pendings = {}
+ while actives:
+ n = picknext(actives)
+ actives.remove(n)
+ s.append(n)
+
+ # Update dependents list
+ for c in children.get(n, []):
+ if c not in pendings:
+ pendings[c] = [p for p in parents[c] if p not in self.map]
+ try:
+ pendings[c].remove(n)
+ except ValueError:
+ raise util.Abort(_('cycle detected between %s and %s')
+ % (recode(c), recode(n)))
+ if not pendings[c]:
+ # Parents are converted, node is eligible
+ actives.insert(0, c)
+ pendings[c] = None
+
+ if len(s) != len(parents):
+ raise util.Abort(_("not all revisions were sorted"))
+
+ return s
+
+ def writeauthormap(self):
+ authorfile = self.authorfile
+ if authorfile:
+ self.ui.status(_('Writing author map file %s\n') % authorfile)
+ ofile = open(authorfile, 'w+')
+ for author in self.authors:
+ ofile.write("%s=%s\n" % (author, self.authors[author]))
+ ofile.close()
+
+ def readauthormap(self, authorfile):
+ afile = open(authorfile, 'r')
+ for line in afile:
+
+ line = line.strip()
+ if not line or line.startswith('#'):
+ continue
+
+ try:
+ srcauthor, dstauthor = line.split('=', 1)
+ except ValueError:
+ msg = _('Ignoring bad line in author map file %s: %s\n')
+ self.ui.warn(msg % (authorfile, line.rstrip()))
+ continue
+
+ srcauthor = srcauthor.strip()
+ dstauthor = dstauthor.strip()
+ if self.authors.get(srcauthor) in (None, dstauthor):
+ msg = _('mapping author %s to %s\n')
+ self.ui.debug(msg % (srcauthor, dstauthor))
+ self.authors[srcauthor] = dstauthor
+ continue
+
+ m = _('overriding mapping for author %s, was %s, will be %s\n')
+ self.ui.status(m % (srcauthor, self.authors[srcauthor], dstauthor))
+
+ afile.close()
+
+ def cachecommit(self, rev):
+ commit = self.source.getcommit(rev)
+ commit.author = self.authors.get(commit.author, commit.author)
+ commit.branch = self.branchmap.get(commit.branch, commit.branch)
+ self.commitcache[rev] = commit
+ return commit
+
+ def copy(self, rev):
+ commit = self.commitcache[rev]
+
+ changes = self.source.getchanges(rev)
+ if isinstance(changes, basestring):
+ if changes == SKIPREV:
+ dest = SKIPREV
+ else:
+ dest = self.map[changes]
+ self.map[rev] = dest
+ return
+ files, copies = changes
+ pbranches = []
+ if commit.parents:
+ for prev in commit.parents:
+ if prev not in self.commitcache:
+ self.cachecommit(prev)
+ pbranches.append((self.map[prev],
+ self.commitcache[prev].branch))
+ self.dest.setbranch(commit.branch, pbranches)
+ try:
+ parents = self.splicemap[rev].replace(',', ' ').split()
+ self.ui.status(_('spliced in %s as parents of %s\n') %
+ (parents, rev))
+ parents = [self.map.get(p, p) for p in parents]
+ except KeyError:
+ parents = [b[0] for b in pbranches]
+ newnode = self.dest.putcommit(files, copies, parents, commit,
+ self.source, self.map)
+ self.source.converted(rev, newnode)
+ self.map[rev] = newnode
+
+ def convert(self, sortmode):
+ try:
+ self.source.before()
+ self.dest.before()
+ self.source.setrevmap(self.map)
+ self.ui.status(_("scanning source...\n"))
+ heads = self.source.getheads()
+ parents = self.walktree(heads)
+ self.ui.status(_("sorting...\n"))
+ t = self.toposort(parents, sortmode)
+ num = len(t)
+ c = None
+
+ self.ui.status(_("converting...\n"))
+ for c in t:
+ num -= 1
+ desc = self.commitcache[c].desc
+ if "\n" in desc:
+ desc = desc.splitlines()[0]
+ # convert log message to local encoding without using
+ # tolocal() because encoding.encoding conver() use it as
+ # 'utf-8'
+ self.ui.status("%d %s\n" % (num, recode(desc)))
+ self.ui.note(_("source: %s\n") % recode(c))
+ self.copy(c)
+
+ tags = self.source.gettags()
+ ctags = {}
+ for k in tags:
+ v = tags[k]
+ if self.map.get(v, SKIPREV) != SKIPREV:
+ ctags[k] = self.map[v]
+
+ if c and ctags:
+ nrev = self.dest.puttags(ctags)
+ # write another hash correspondence to override the previous
+ # one so we don't end up with extra tag heads
+ if nrev:
+ self.map[c] = nrev
+
+ self.writeauthormap()
+ finally:
+ self.cleanup()
+
+ def cleanup(self):
+ try:
+ self.dest.after()
+ finally:
+ self.source.after()
+ self.map.close()
+
+def convert(ui, src, dest=None, revmapfile=None, **opts):
+ global orig_encoding
+ orig_encoding = encoding.encoding
+ encoding.encoding = 'UTF-8'
+
+ if not dest:
+ dest = hg.defaultdest(src) + "-hg"
+ ui.status(_("assuming destination %s\n") % dest)
+
+ destc = convertsink(ui, dest, opts.get('dest_type'))
+
+ try:
+ srcc, defaultsort = convertsource(ui, src, opts.get('source_type'),
+ opts.get('rev'))
+ except Exception:
+ for path in destc.created:
+ shutil.rmtree(path, True)
+ raise
+
+ sortmodes = ('branchsort', 'datesort', 'sourcesort')
+ sortmode = [m for m in sortmodes if opts.get(m)]
+ if len(sortmode) > 1:
+ raise util.Abort(_('more than one sort mode specified'))
+ sortmode = sortmode and sortmode[0] or defaultsort
+ if sortmode == 'sourcesort' and not srcc.hasnativeorder():
+ raise util.Abort(_('--sourcesort is not supported by this data source'))
+
+ fmap = opts.get('filemap')
+ if fmap:
+ srcc = filemap.filemap_source(ui, srcc, fmap)
+ destc.setfilemapmode(True)
+
+ if not revmapfile:
+ try:
+ revmapfile = destc.revmapfile()
+ except:
+ revmapfile = os.path.join(destc, "map")
+
+ c = converter(ui, srcc, destc, revmapfile, opts)
+ c.convert(sortmode)
+
diff --git a/sys/src/cmd/hg/hgext/convert/cvs.py b/sys/src/cmd/hg/hgext/convert/cvs.py
new file mode 100644
index 000000000..c215747be
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/convert/cvs.py
@@ -0,0 +1,372 @@
+# cvs.py: CVS conversion code inspired by hg-cvs-import and git-cvsimport
+#
+# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import os, locale, re, socket, errno
+from cStringIO import StringIO
+from mercurial import util
+from mercurial.i18n import _
+
+from common import NoRepo, commit, converter_source, checktool
+import cvsps
+
+class convert_cvs(converter_source):
+ def __init__(self, ui, path, rev=None):
+ super(convert_cvs, self).__init__(ui, path, rev=rev)
+
+ cvs = os.path.join(path, "CVS")
+ if not os.path.exists(cvs):
+ raise NoRepo("%s does not look like a CVS checkout" % path)
+
+ checktool('cvs')
+ self.cmd = ui.config('convert', 'cvsps', 'builtin')
+ cvspsexe = self.cmd.split(None, 1)[0]
+ self.builtin = cvspsexe == 'builtin'
+ if not self.builtin:
+ ui.warn(_('warning: support for external cvsps is deprecated and '
+ 'will be removed in Mercurial 1.4\n'))
+
+ if not self.builtin:
+ checktool(cvspsexe)
+
+ self.changeset = None
+ self.files = {}
+ self.tags = {}
+ self.lastbranch = {}
+ self.parent = {}
+ self.socket = None
+ self.cvsroot = open(os.path.join(cvs, "Root")).read()[:-1]
+ self.cvsrepo = open(os.path.join(cvs, "Repository")).read()[:-1]
+ self.encoding = locale.getpreferredencoding()
+
+ self._connect()
+
+ def _parse(self):
+ if self.changeset is not None:
+ return
+ self.changeset = {}
+
+ maxrev = 0
+ cmd = self.cmd
+ if self.rev:
+ # TODO: handle tags
+ try:
+ # patchset number?
+ maxrev = int(self.rev)
+ except ValueError:
+ try:
+ # date
+ util.parsedate(self.rev, ['%Y/%m/%d %H:%M:%S'])
+ cmd = '%s -d "1970/01/01 00:00:01" -d "%s"' % (cmd, self.rev)
+ except util.Abort:
+ raise util.Abort(_('revision %s is not a patchset number or date') % self.rev)
+
+ d = os.getcwd()
+ try:
+ os.chdir(self.path)
+ id = None
+ state = 0
+ filerevids = {}
+
+ if self.builtin:
+ # builtin cvsps code
+ self.ui.status(_('using builtin cvsps\n'))
+
+ cache = 'update'
+ if not self.ui.configbool('convert', 'cvsps.cache', True):
+ cache = None
+ db = cvsps.createlog(self.ui, cache=cache)
+ db = cvsps.createchangeset(self.ui, db,
+ fuzz=int(self.ui.config('convert', 'cvsps.fuzz', 60)),
+ mergeto=self.ui.config('convert', 'cvsps.mergeto', None),
+ mergefrom=self.ui.config('convert', 'cvsps.mergefrom', None))
+
+ for cs in db:
+ if maxrev and cs.id>maxrev:
+ break
+ id = str(cs.id)
+ cs.author = self.recode(cs.author)
+ self.lastbranch[cs.branch] = id
+ cs.comment = self.recode(cs.comment)
+ date = util.datestr(cs.date)
+ self.tags.update(dict.fromkeys(cs.tags, id))
+
+ files = {}
+ for f in cs.entries:
+ files[f.file] = "%s%s" % ('.'.join([str(x) for x in f.revision]),
+ ['', '(DEAD)'][f.dead])
+
+ # add current commit to set
+ c = commit(author=cs.author, date=date,
+ parents=[str(p.id) for p in cs.parents],
+ desc=cs.comment, branch=cs.branch or '')
+ self.changeset[id] = c
+ self.files[id] = files
+ else:
+ # external cvsps
+ for l in util.popen(cmd):
+ if state == 0: # header
+ if l.startswith("PatchSet"):
+ id = l[9:-2]
+ if maxrev and int(id) > maxrev:
+ # ignore everything
+ state = 3
+ elif l.startswith("Date:"):
+ date = util.parsedate(l[6:-1], ["%Y/%m/%d %H:%M:%S"])
+ date = util.datestr(date)
+ elif l.startswith("Branch:"):
+ branch = l[8:-1]
+ self.parent[id] = self.lastbranch.get(branch, 'bad')
+ self.lastbranch[branch] = id
+ elif l.startswith("Ancestor branch:"):
+ ancestor = l[17:-1]
+ # figure out the parent later
+ self.parent[id] = self.lastbranch[ancestor]
+ elif l.startswith("Author:"):
+ author = self.recode(l[8:-1])
+ elif l.startswith("Tag:") or l.startswith("Tags:"):
+ t = l[l.index(':')+1:]
+ t = [ut.strip() for ut in t.split(',')]
+ if (len(t) > 1) or (t[0] and (t[0] != "(none)")):
+ self.tags.update(dict.fromkeys(t, id))
+ elif l.startswith("Log:"):
+ # switch to gathering log
+ state = 1
+ log = ""
+ elif state == 1: # log
+ if l == "Members: \n":
+ # switch to gathering members
+ files = {}
+ oldrevs = []
+ log = self.recode(log[:-1])
+ state = 2
+ else:
+ # gather log
+ log += l
+ elif state == 2: # members
+ if l == "\n": # start of next entry
+ state = 0
+ p = [self.parent[id]]
+ if id == "1":
+ p = []
+ if branch == "HEAD":
+ branch = ""
+ if branch:
+ latest = 0
+ # the last changeset that contains a base
+ # file is our parent
+ for r in oldrevs:
+ latest = max(filerevids.get(r, 0), latest)
+ if latest:
+ p = [latest]
+
+ # add current commit to set
+ c = commit(author=author, date=date, parents=p,
+ desc=log, branch=branch)
+ self.changeset[id] = c
+ self.files[id] = files
+ else:
+ colon = l.rfind(':')
+ file = l[1:colon]
+ rev = l[colon+1:-2]
+ oldrev, rev = rev.split("->")
+ files[file] = rev
+
+ # save some information for identifying branch points
+ oldrevs.append("%s:%s" % (oldrev, file))
+ filerevids["%s:%s" % (rev, file)] = id
+ elif state == 3:
+ # swallow all input
+ continue
+
+ self.heads = self.lastbranch.values()
+ finally:
+ os.chdir(d)
+
+ def _connect(self):
+ root = self.cvsroot
+ conntype = None
+ user, host = None, None
+ cmd = ['cvs', 'server']
+
+ self.ui.status(_("connecting to %s\n") % root)
+
+ if root.startswith(":pserver:"):
+ root = root[9:]
+ m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)',
+ root)
+ if m:
+ conntype = "pserver"
+ user, passw, serv, port, root = m.groups()
+ if not user:
+ user = "anonymous"
+ if not port:
+ port = 2401
+ else:
+ port = int(port)
+ format0 = ":pserver:%s@%s:%s" % (user, serv, root)
+ format1 = ":pserver:%s@%s:%d%s" % (user, serv, port, root)
+
+ if not passw:
+ passw = "A"
+ cvspass = os.path.expanduser("~/.cvspass")
+ try:
+ pf = open(cvspass)
+ for line in pf.read().splitlines():
+ part1, part2 = line.split(' ', 1)
+ if part1 == '/1':
+ # /1 :pserver:user@example.com:2401/cvsroot/foo Ah<Z
+ part1, part2 = part2.split(' ', 1)
+ format = format1
+ else:
+ # :pserver:user@example.com:/cvsroot/foo Ah<Z
+ format = format0
+ if part1 == format:
+ passw = part2
+ break
+ pf.close()
+ except IOError, inst:
+ if inst.errno != errno.ENOENT:
+ if not getattr(inst, 'filename', None):
+ inst.filename = cvspass
+ raise
+
+ sck = socket.socket()
+ sck.connect((serv, port))
+ sck.send("\n".join(["BEGIN AUTH REQUEST", root, user, passw,
+ "END AUTH REQUEST", ""]))
+ if sck.recv(128) != "I LOVE YOU\n":
+ raise util.Abort(_("CVS pserver authentication failed"))
+
+ self.writep = self.readp = sck.makefile('r+')
+
+ if not conntype and root.startswith(":local:"):
+ conntype = "local"
+ root = root[7:]
+
+ if not conntype:
+ # :ext:user@host/home/user/path/to/cvsroot
+ if root.startswith(":ext:"):
+ root = root[5:]
+ m = re.match(r'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
+ # Do not take Windows path "c:\foo\bar" for a connection strings
+ if os.path.isdir(root) or not m:
+ conntype = "local"
+ else:
+ conntype = "rsh"
+ user, host, root = m.group(1), m.group(2), m.group(3)
+
+ if conntype != "pserver":
+ if conntype == "rsh":
+ rsh = os.environ.get("CVS_RSH") or "ssh"
+ if user:
+ cmd = [rsh, '-l', user, host] + cmd
+ else:
+ cmd = [rsh, host] + cmd
+
+ # popen2 does not support argument lists under Windows
+ cmd = [util.shellquote(arg) for arg in cmd]
+ cmd = util.quotecommand(' '.join(cmd))
+ self.writep, self.readp = util.popen2(cmd)
+
+ self.realroot = root
+
+ self.writep.write("Root %s\n" % root)
+ self.writep.write("Valid-responses ok error Valid-requests Mode"
+ " M Mbinary E Checked-in Created Updated"
+ " Merged Removed\n")
+ self.writep.write("valid-requests\n")
+ self.writep.flush()
+ r = self.readp.readline()
+ if not r.startswith("Valid-requests"):
+ raise util.Abort(_("unexpected response from CVS server "
+ "(expected \"Valid-requests\", but got %r)")
+ % r)
+ if "UseUnchanged" in r:
+ self.writep.write("UseUnchanged\n")
+ self.writep.flush()
+ r = self.readp.readline()
+
+ def getheads(self):
+ self._parse()
+ return self.heads
+
+ def _getfile(self, name, rev):
+
+ def chunkedread(fp, count):
+ # file-objects returned by socked.makefile() do not handle
+ # large read() requests very well.
+ chunksize = 65536
+ output = StringIO()
+ while count > 0:
+ data = fp.read(min(count, chunksize))
+ if not data:
+ raise util.Abort(_("%d bytes missing from remote file") % count)
+ count -= len(data)
+ output.write(data)
+ return output.getvalue()
+
+ if rev.endswith("(DEAD)"):
+ raise IOError
+
+ args = ("-N -P -kk -r %s --" % rev).split()
+ args.append(self.cvsrepo + '/' + name)
+ for x in args:
+ self.writep.write("Argument %s\n" % x)
+ self.writep.write("Directory .\n%s\nco\n" % self.realroot)
+ self.writep.flush()
+
+ data = ""
+ while 1:
+ line = self.readp.readline()
+ if line.startswith("Created ") or line.startswith("Updated "):
+ self.readp.readline() # path
+ self.readp.readline() # entries
+ mode = self.readp.readline()[:-1]
+ count = int(self.readp.readline()[:-1])
+ data = chunkedread(self.readp, count)
+ elif line.startswith(" "):
+ data += line[1:]
+ elif line.startswith("M "):
+ pass
+ elif line.startswith("Mbinary "):
+ count = int(self.readp.readline()[:-1])
+ data = chunkedread(self.readp, count)
+ else:
+ if line == "ok\n":
+ return (data, "x" in mode and "x" or "")
+ elif line.startswith("E "):
+ self.ui.warn(_("cvs server: %s\n") % line[2:])
+ elif line.startswith("Remove"):
+ self.readp.readline()
+ else:
+ raise util.Abort(_("unknown CVS response: %s") % line)
+
+ def getfile(self, file, rev):
+ self._parse()
+ data, mode = self._getfile(file, rev)
+ self.modecache[(file, rev)] = mode
+ return data
+
+ def getmode(self, file, rev):
+ return self.modecache[(file, rev)]
+
+ def getchanges(self, rev):
+ self._parse()
+ self.modecache = {}
+ return sorted(self.files[rev].iteritems()), {}
+
+ def getcommit(self, rev):
+ self._parse()
+ return self.changeset[rev]
+
+ def gettags(self):
+ self._parse()
+ return self.tags
+
+ def getchangedfiles(self, rev, i):
+ self._parse()
+ return sorted(self.files[rev])
diff --git a/sys/src/cmd/hg/hgext/convert/cvsps.py b/sys/src/cmd/hg/hgext/convert/cvsps.py
new file mode 100644
index 000000000..02db47e25
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/convert/cvsps.py
@@ -0,0 +1,831 @@
+#
+# Mercurial built-in replacement for cvsps.
+#
+# Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import os
+import re
+import cPickle as pickle
+from mercurial import util
+from mercurial.i18n import _
+
+class logentry(object):
+ '''Class logentry has the following attributes:
+ .author - author name as CVS knows it
+ .branch - name of branch this revision is on
+ .branches - revision tuple of branches starting at this revision
+ .comment - commit message
+ .date - the commit date as a (time, tz) tuple
+ .dead - true if file revision is dead
+ .file - Name of file
+ .lines - a tuple (+lines, -lines) or None
+ .parent - Previous revision of this entry
+ .rcs - name of file as returned from CVS
+ .revision - revision number as tuple
+ .tags - list of tags on the file
+ .synthetic - is this a synthetic "file ... added on ..." revision?
+ .mergepoint- the branch that has been merged from
+ (if present in rlog output)
+ .branchpoints- the branches that start at the current entry
+ '''
+ def __init__(self, **entries):
+ self.__dict__.update(entries)
+
+ def __repr__(self):
+ return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
+ id(self),
+ self.file,
+ ".".join(map(str, self.revision)))
+
+class logerror(Exception):
+ pass
+
+def getrepopath(cvspath):
+ """Return the repository path from a CVS path.
+
+ >>> getrepopath('/foo/bar')
+ '/foo/bar'
+ >>> getrepopath('c:/foo/bar')
+ 'c:/foo/bar'
+ >>> getrepopath(':pserver:10/foo/bar')
+ '/foo/bar'
+ >>> getrepopath(':pserver:10c:/foo/bar')
+ '/foo/bar'
+ >>> getrepopath(':pserver:/foo/bar')
+ '/foo/bar'
+ >>> getrepopath(':pserver:c:/foo/bar')
+ 'c:/foo/bar'
+ >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
+ '/foo/bar'
+ >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
+ 'c:/foo/bar'
+ """
+ # According to CVS manual, CVS paths are expressed like:
+ # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
+ #
+ # Unfortunately, Windows absolute paths start with a drive letter
+ # like 'c:' making it harder to parse. Here we assume that drive
+ # letters are only one character long and any CVS component before
+ # the repository path is at least 2 characters long, and use this
+ # to disambiguate.
+ parts = cvspath.split(':')
+ if len(parts) == 1:
+ return parts[0]
+ # Here there is an ambiguous case if we have a port number
+ # immediately followed by a Windows driver letter. We assume this
+ # never happens and decide it must be CVS path component,
+ # therefore ignoring it.
+ if len(parts[-2]) > 1:
+ return parts[-1].lstrip('0123456789')
+ return parts[-2] + ':' + parts[-1]
+
+def createlog(ui, directory=None, root="", rlog=True, cache=None):
+ '''Collect the CVS rlog'''
+
+ # Because we store many duplicate commit log messages, reusing strings
+ # saves a lot of memory and pickle storage space.
+ _scache = {}
+ def scache(s):
+ "return a shared version of a string"
+ return _scache.setdefault(s, s)
+
+ ui.status(_('collecting CVS rlog\n'))
+
+ log = [] # list of logentry objects containing the CVS state
+
+ # patterns to match in CVS (r)log output, by state of use
+ re_00 = re.compile('RCS file: (.+)$')
+ re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
+ re_02 = re.compile('cvs (r?log|server): (.+)\n$')
+ re_03 = re.compile("(Cannot access.+CVSROOT)|"
+ "(can't create temporary directory.+)$")
+ re_10 = re.compile('Working file: (.+)$')
+ re_20 = re.compile('symbolic names:')
+ re_30 = re.compile('\t(.+): ([\\d.]+)$')
+ re_31 = re.compile('----------------------------$')
+ re_32 = re.compile('======================================='
+ '======================================$')
+ re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
+ re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
+ r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
+ r'(.*mergepoint:\s+([^;]+);)?')
+ re_70 = re.compile('branches: (.+);$')
+
+ file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
+
+ prefix = '' # leading path to strip of what we get from CVS
+
+ if directory is None:
+ # Current working directory
+
+ # Get the real directory in the repository
+ try:
+ prefix = open(os.path.join('CVS','Repository')).read().strip()
+ if prefix == ".":
+ prefix = ""
+ directory = prefix
+ except IOError:
+ raise logerror('Not a CVS sandbox')
+
+ if prefix and not prefix.endswith(os.sep):
+ prefix += os.sep
+
+ # Use the Root file in the sandbox, if it exists
+ try:
+ root = open(os.path.join('CVS','Root')).read().strip()
+ except IOError:
+ pass
+
+ if not root:
+ root = os.environ.get('CVSROOT', '')
+
+ # read log cache if one exists
+ oldlog = []
+ date = None
+
+ if cache:
+ cachedir = os.path.expanduser('~/.hg.cvsps')
+ if not os.path.exists(cachedir):
+ os.mkdir(cachedir)
+
+ # The cvsps cache pickle needs a uniquified name, based on the
+ # repository location. The address may have all sort of nasties
+ # in it, slashes, colons and such. So here we take just the
+ # alphanumerics, concatenated in a way that does not mix up the
+ # various components, so that
+ # :pserver:user@server:/path
+ # and
+ # /pserver/user/server/path
+ # are mapped to different cache file names.
+ cachefile = root.split(":") + [directory, "cache"]
+ cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
+ cachefile = os.path.join(cachedir,
+ '.'.join([s for s in cachefile if s]))
+
+ if cache == 'update':
+ try:
+ ui.note(_('reading cvs log cache %s\n') % cachefile)
+ oldlog = pickle.load(open(cachefile))
+ ui.note(_('cache has %d log entries\n') % len(oldlog))
+ except Exception, e:
+ ui.note(_('error reading cache: %r\n') % e)
+
+ if oldlog:
+ date = oldlog[-1].date # last commit date as a (time,tz) tuple
+ date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
+
+ # build the CVS commandline
+ cmd = ['cvs', '-q']
+ if root:
+ cmd.append('-d%s' % root)
+ p = util.normpath(getrepopath(root))
+ if not p.endswith('/'):
+ p += '/'
+ prefix = p + util.normpath(prefix)
+ cmd.append(['log', 'rlog'][rlog])
+ if date:
+ # no space between option and date string
+ cmd.append('-d>%s' % date)
+ cmd.append(directory)
+
+ # state machine begins here
+ tags = {} # dictionary of revisions on current file with their tags
+ branchmap = {} # mapping between branch names and revision numbers
+ state = 0
+ store = False # set when a new record can be appended
+
+ cmd = [util.shellquote(arg) for arg in cmd]
+ ui.note(_("running %s\n") % (' '.join(cmd)))
+ ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
+
+ pfp = util.popen(' '.join(cmd))
+ peek = pfp.readline()
+ while True:
+ line = peek
+ if line == '':
+ break
+ peek = pfp.readline()
+ if line.endswith('\n'):
+ line = line[:-1]
+ #ui.debug('state=%d line=%r\n' % (state, line))
+
+ if state == 0:
+ # initial state, consume input until we see 'RCS file'
+ match = re_00.match(line)
+ if match:
+ rcs = match.group(1)
+ tags = {}
+ if rlog:
+ filename = util.normpath(rcs[:-2])
+ if filename.startswith(prefix):
+ filename = filename[len(prefix):]
+ if filename.startswith('/'):
+ filename = filename[1:]
+ if filename.startswith('Attic/'):
+ filename = filename[6:]
+ else:
+ filename = filename.replace('/Attic/', '/')
+ state = 2
+ continue
+ state = 1
+ continue
+ match = re_01.match(line)
+ if match:
+ raise Exception(match.group(1))
+ match = re_02.match(line)
+ if match:
+ raise Exception(match.group(2))
+ if re_03.match(line):
+ raise Exception(line)
+
+ elif state == 1:
+ # expect 'Working file' (only when using log instead of rlog)
+ match = re_10.match(line)
+ assert match, _('RCS file must be followed by working file')
+ filename = util.normpath(match.group(1))
+ state = 2
+
+ elif state == 2:
+ # expect 'symbolic names'
+ if re_20.match(line):
+ branchmap = {}
+ state = 3
+
+ elif state == 3:
+ # read the symbolic names and store as tags
+ match = re_30.match(line)
+ if match:
+ rev = [int(x) for x in match.group(2).split('.')]
+
+ # Convert magic branch number to an odd-numbered one
+ revn = len(rev)
+ if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
+ rev = rev[:-2] + rev[-1:]
+ rev = tuple(rev)
+
+ if rev not in tags:
+ tags[rev] = []
+ tags[rev].append(match.group(1))
+ branchmap[match.group(1)] = match.group(2)
+
+ elif re_31.match(line):
+ state = 5
+ elif re_32.match(line):
+ state = 0
+
+ elif state == 4:
+ # expecting '------' separator before first revision
+ if re_31.match(line):
+ state = 5
+ else:
+ assert not re_32.match(line), _('must have at least '
+ 'some revisions')
+
+ elif state == 5:
+ # expecting revision number and possibly (ignored) lock indication
+ # we create the logentry here from values stored in states 0 to 4,
+ # as this state is re-entered for subsequent revisions of a file.
+ match = re_50.match(line)
+ assert match, _('expected revision number')
+ e = logentry(rcs=scache(rcs), file=scache(filename),
+ revision=tuple([int(x) for x in match.group(1).split('.')]),
+ branches=[], parent=None,
+ synthetic=False)
+ state = 6
+
+ elif state == 6:
+ # expecting date, author, state, lines changed
+ match = re_60.match(line)
+ assert match, _('revision must be followed by date line')
+ d = match.group(1)
+ if d[2] == '/':
+ # Y2K
+ d = '19' + d
+
+ if len(d.split()) != 3:
+ # cvs log dates always in GMT
+ d = d + ' UTC'
+ e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
+ '%Y/%m/%d %H:%M:%S',
+ '%Y-%m-%d %H:%M:%S'])
+ e.author = scache(match.group(2))
+ e.dead = match.group(3).lower() == 'dead'
+
+ if match.group(5):
+ if match.group(6):
+ e.lines = (int(match.group(5)), int(match.group(6)))
+ else:
+ e.lines = (int(match.group(5)), 0)
+ elif match.group(6):
+ e.lines = (0, int(match.group(6)))
+ else:
+ e.lines = None
+
+ if match.group(7): # cvsnt mergepoint
+ myrev = match.group(8).split('.')
+ if len(myrev) == 2: # head
+ e.mergepoint = 'HEAD'
+ else:
+ myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
+ branches = [b for b in branchmap if branchmap[b] == myrev]
+ assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
+ e.mergepoint = branches[0]
+ else:
+ e.mergepoint = None
+ e.comment = []
+ state = 7
+
+ elif state == 7:
+ # read the revision numbers of branches that start at this revision
+ # or store the commit log message otherwise
+ m = re_70.match(line)
+ if m:
+ e.branches = [tuple([int(y) for y in x.strip().split('.')])
+ for x in m.group(1).split(';')]
+ state = 8
+ elif re_31.match(line) and re_50.match(peek):
+ state = 5
+ store = True
+ elif re_32.match(line):
+ state = 0
+ store = True
+ else:
+ e.comment.append(line)
+
+ elif state == 8:
+ # store commit log message
+ if re_31.match(line):
+ state = 5
+ store = True
+ elif re_32.match(line):
+ state = 0
+ store = True
+ else:
+ e.comment.append(line)
+
+ # When a file is added on a branch B1, CVS creates a synthetic
+ # dead trunk revision 1.1 so that the branch has a root.
+ # Likewise, if you merge such a file to a later branch B2 (one
+ # that already existed when the file was added on B1), CVS
+ # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
+ # these revisions now, but mark them synthetic so
+ # createchangeset() can take care of them.
+ if (store and
+ e.dead and
+ e.revision[-1] == 1 and # 1.1 or 1.1.x.1
+ len(e.comment) == 1 and
+ file_added_re.match(e.comment[0])):
+ ui.debug(_('found synthetic revision in %s: %r\n')
+ % (e.rcs, e.comment[0]))
+ e.synthetic = True
+
+ if store:
+ # clean up the results and save in the log.
+ store = False
+ e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
+ e.comment = scache('\n'.join(e.comment))
+
+ revn = len(e.revision)
+ if revn > 3 and (revn % 2) == 0:
+ e.branch = tags.get(e.revision[:-1], [None])[0]
+ else:
+ e.branch = None
+
+ # find the branches starting from this revision
+ branchpoints = set()
+ for branch, revision in branchmap.iteritems():
+ revparts = tuple([int(i) for i in revision.split('.')])
+ if revparts[-2] == 0 and revparts[-1] % 2 == 0:
+ # normal branch
+ if revparts[:-2] == e.revision:
+ branchpoints.add(branch)
+ elif revparts == (1,1,1): # vendor branch
+ if revparts in e.branches:
+ branchpoints.add(branch)
+ e.branchpoints = branchpoints
+
+ log.append(e)
+
+ if len(log) % 100 == 0:
+ ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
+
+ log.sort(key=lambda x: (x.rcs, x.revision))
+
+ # find parent revisions of individual files
+ versions = {}
+ for e in log:
+ branch = e.revision[:-1]
+ p = versions.get((e.rcs, branch), None)
+ if p is None:
+ p = e.revision[:-2]
+ e.parent = p
+ versions[(e.rcs, branch)] = e.revision
+
+ # update the log cache
+ if cache:
+ if log:
+ # join up the old and new logs
+ log.sort(key=lambda x: x.date)
+
+ if oldlog and oldlog[-1].date >= log[0].date:
+ raise logerror('Log cache overlaps with new log entries,'
+ ' re-run without cache.')
+
+ log = oldlog + log
+
+ # write the new cachefile
+ ui.note(_('writing cvs log cache %s\n') % cachefile)
+ pickle.dump(log, open(cachefile, 'w'))
+ else:
+ log = oldlog
+
+ ui.status(_('%d log entries\n') % len(log))
+
+ return log
+
+
+class changeset(object):
+ '''Class changeset has the following attributes:
+ .id - integer identifying this changeset (list index)
+ .author - author name as CVS knows it
+ .branch - name of branch this changeset is on, or None
+ .comment - commit message
+ .date - the commit date as a (time,tz) tuple
+ .entries - list of logentry objects in this changeset
+ .parents - list of one or two parent changesets
+ .tags - list of tags on this changeset
+ .synthetic - from synthetic revision "file ... added on branch ..."
+ .mergepoint- the branch that has been merged from
+ (if present in rlog output)
+ .branchpoints- the branches that start at the current entry
+ '''
+ def __init__(self, **entries):
+ self.__dict__.update(entries)
+
+ def __repr__(self):
+ return "<%s at 0x%x: %s>" % (self.__class__.__name__,
+ id(self),
+ getattr(self, 'id', "(no id)"))
+
+def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
+ '''Convert log into changesets.'''
+
+ ui.status(_('creating changesets\n'))
+
+ # Merge changesets
+
+ log.sort(key=lambda x: (x.comment, x.author, x.branch, x.date))
+
+ changesets = []
+ files = set()
+ c = None
+ for i, e in enumerate(log):
+
+ # Check if log entry belongs to the current changeset or not.
+
+ # Since CVS is file centric, two different file revisions with
+ # different branchpoints should be treated as belonging to two
+ # different changesets (and the ordering is important and not
+ # honoured by cvsps at this point).
+ #
+ # Consider the following case:
+ # foo 1.1 branchpoints: [MYBRANCH]
+ # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
+ #
+ # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
+ # later version of foo may be in MYBRANCH2, so foo should be the
+ # first changeset and bar the next and MYBRANCH and MYBRANCH2
+ # should both start off of the bar changeset. No provisions are
+ # made to ensure that this is, in fact, what happens.
+ if not (c and
+ e.comment == c.comment and
+ e.author == c.author and
+ e.branch == c.branch and
+ (not hasattr(e, 'branchpoints') or
+ not hasattr (c, 'branchpoints') or
+ e.branchpoints == c.branchpoints) and
+ ((c.date[0] + c.date[1]) <=
+ (e.date[0] + e.date[1]) <=
+ (c.date[0] + c.date[1]) + fuzz) and
+ e.file not in files):
+ c = changeset(comment=e.comment, author=e.author,
+ branch=e.branch, date=e.date, entries=[],
+ mergepoint=getattr(e, 'mergepoint', None),
+ branchpoints=getattr(e, 'branchpoints', set()))
+ changesets.append(c)
+ files = set()
+ if len(changesets) % 100 == 0:
+ t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
+ ui.status(util.ellipsis(t, 80) + '\n')
+
+ c.entries.append(e)
+ files.add(e.file)
+ c.date = e.date # changeset date is date of latest commit in it
+
+ # Mark synthetic changesets
+
+ for c in changesets:
+ # Synthetic revisions always get their own changeset, because
+ # the log message includes the filename. E.g. if you add file3
+ # and file4 on a branch, you get four log entries and three
+ # changesets:
+ # "File file3 was added on branch ..." (synthetic, 1 entry)
+ # "File file4 was added on branch ..." (synthetic, 1 entry)
+ # "Add file3 and file4 to fix ..." (real, 2 entries)
+ # Hence the check for 1 entry here.
+ synth = getattr(c.entries[0], 'synthetic', None)
+ c.synthetic = (len(c.entries) == 1 and synth)
+
+ # Sort files in each changeset
+
+ for c in changesets:
+ def pathcompare(l, r):
+ 'Mimic cvsps sorting order'
+ l = l.split('/')
+ r = r.split('/')
+ nl = len(l)
+ nr = len(r)
+ n = min(nl, nr)
+ for i in range(n):
+ if i + 1 == nl and nl < nr:
+ return -1
+ elif i + 1 == nr and nl > nr:
+ return +1
+ elif l[i] < r[i]:
+ return -1
+ elif l[i] > r[i]:
+ return +1
+ return 0
+ def entitycompare(l, r):
+ return pathcompare(l.file, r.file)
+
+ c.entries.sort(entitycompare)
+
+ # Sort changesets by date
+
+ def cscmp(l, r):
+ d = sum(l.date) - sum(r.date)
+ if d:
+ return d
+
+ # detect vendor branches and initial commits on a branch
+ le = {}
+ for e in l.entries:
+ le[e.rcs] = e.revision
+ re = {}
+ for e in r.entries:
+ re[e.rcs] = e.revision
+
+ d = 0
+ for e in l.entries:
+ if re.get(e.rcs, None) == e.parent:
+ assert not d
+ d = 1
+ break
+
+ for e in r.entries:
+ if le.get(e.rcs, None) == e.parent:
+ assert not d
+ d = -1
+ break
+
+ return d
+
+ changesets.sort(cscmp)
+
+ # Collect tags
+
+ globaltags = {}
+ for c in changesets:
+ for e in c.entries:
+ for tag in e.tags:
+ # remember which is the latest changeset to have this tag
+ globaltags[tag] = c
+
+ for c in changesets:
+ tags = set()
+ for e in c.entries:
+ tags.update(e.tags)
+ # remember tags only if this is the latest changeset to have it
+ c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
+
+ # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
+ # by inserting dummy changesets with two parents, and handle
+ # {{mergefrombranch BRANCHNAME}} by setting two parents.
+
+ if mergeto is None:
+ mergeto = r'{{mergetobranch ([-\w]+)}}'
+ if mergeto:
+ mergeto = re.compile(mergeto)
+
+ if mergefrom is None:
+ mergefrom = r'{{mergefrombranch ([-\w]+)}}'
+ if mergefrom:
+ mergefrom = re.compile(mergefrom)
+
+ versions = {} # changeset index where we saw any particular file version
+ branches = {} # changeset index where we saw a branch
+ n = len(changesets)
+ i = 0
+ while i<n:
+ c = changesets[i]
+
+ for f in c.entries:
+ versions[(f.rcs, f.revision)] = i
+
+ p = None
+ if c.branch in branches:
+ p = branches[c.branch]
+ else:
+ # first changeset on a new branch
+ # the parent is a changeset with the branch in its
+ # branchpoints such that it is the latest possible
+ # commit without any intervening, unrelated commits.
+
+ for candidate in xrange(i):
+ if c.branch not in changesets[candidate].branchpoints:
+ if p is not None:
+ break
+ continue
+ p = candidate
+
+ c.parents = []
+ if p is not None:
+ p = changesets[p]
+
+ # Ensure no changeset has a synthetic changeset as a parent.
+ while p.synthetic:
+ assert len(p.parents) <= 1, \
+ _('synthetic changeset cannot have multiple parents')
+ if p.parents:
+ p = p.parents[0]
+ else:
+ p = None
+ break
+
+ if p is not None:
+ c.parents.append(p)
+
+ if c.mergepoint:
+ if c.mergepoint == 'HEAD':
+ c.mergepoint = None
+ c.parents.append(changesets[branches[c.mergepoint]])
+
+ if mergefrom:
+ m = mergefrom.search(c.comment)
+ if m:
+ m = m.group(1)
+ if m == 'HEAD':
+ m = None
+ try:
+ candidate = changesets[branches[m]]
+ except KeyError:
+ ui.warn(_("warning: CVS commit message references "
+ "non-existent branch %r:\n%s\n")
+ % (m, c.comment))
+ if m in branches and c.branch != m and not candidate.synthetic:
+ c.parents.append(candidate)
+
+ if mergeto:
+ m = mergeto.search(c.comment)
+ if m:
+ try:
+ m = m.group(1)
+ if m == 'HEAD':
+ m = None
+ except:
+ m = None # if no group found then merge to HEAD
+ if m in branches and c.branch != m:
+ # insert empty changeset for merge
+ cc = changeset(author=c.author, branch=m, date=c.date,
+ comment='convert-repo: CVS merge from branch %s' % c.branch,
+ entries=[], tags=[], parents=[changesets[branches[m]], c])
+ changesets.insert(i + 1, cc)
+ branches[m] = i + 1
+
+ # adjust our loop counters now we have inserted a new entry
+ n += 1
+ i += 2
+ continue
+
+ branches[c.branch] = i
+ i += 1
+
+ # Drop synthetic changesets (safe now that we have ensured no other
+ # changesets can have them as parents).
+ i = 0
+ while i < len(changesets):
+ if changesets[i].synthetic:
+ del changesets[i]
+ else:
+ i += 1
+
+ # Number changesets
+
+ for i, c in enumerate(changesets):
+ c.id = i + 1
+
+ ui.status(_('%d changeset entries\n') % len(changesets))
+
+ return changesets
+
+
+def debugcvsps(ui, *args, **opts):
+ '''Read CVS rlog for current directory or named path in
+ repository, and convert the log to changesets based on matching
+ commit log entries and dates.
+ '''
+ if opts["new_cache"]:
+ cache = "write"
+ elif opts["update_cache"]:
+ cache = "update"
+ else:
+ cache = None
+
+ revisions = opts["revisions"]
+
+ try:
+ if args:
+ log = []
+ for d in args:
+ log += createlog(ui, d, root=opts["root"], cache=cache)
+ else:
+ log = createlog(ui, root=opts["root"], cache=cache)
+ except logerror, e:
+ ui.write("%r\n"%e)
+ return
+
+ changesets = createchangeset(ui, log, opts["fuzz"])
+ del log
+
+ # Print changesets (optionally filtered)
+
+ off = len(revisions)
+ branches = {} # latest version number in each branch
+ ancestors = {} # parent branch
+ for cs in changesets:
+
+ if opts["ancestors"]:
+ if cs.branch not in branches and cs.parents and cs.parents[0].id:
+ ancestors[cs.branch] = (changesets[cs.parents[0].id-1].branch,
+ cs.parents[0].id)
+ branches[cs.branch] = cs.id
+
+ # limit by branches
+ if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
+ continue
+
+ if not off:
+ # Note: trailing spaces on several lines here are needed to have
+ # bug-for-bug compatibility with cvsps.
+ ui.write('---------------------\n')
+ ui.write('PatchSet %d \n' % cs.id)
+ ui.write('Date: %s\n' % util.datestr(cs.date,
+ '%Y/%m/%d %H:%M:%S %1%2'))
+ ui.write('Author: %s\n' % cs.author)
+ ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
+ ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
+ ','.join(cs.tags) or '(none)'))
+ branchpoints = getattr(cs, 'branchpoints', None)
+ if branchpoints:
+ ui.write('Branchpoints: %s \n' % ', '.join(branchpoints))
+ if opts["parents"] and cs.parents:
+ if len(cs.parents)>1:
+ ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
+ else:
+ ui.write('Parent: %d\n' % cs.parents[0].id)
+
+ if opts["ancestors"]:
+ b = cs.branch
+ r = []
+ while b:
+ b, c = ancestors[b]
+ r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
+ if r:
+ ui.write('Ancestors: %s\n' % (','.join(r)))
+
+ ui.write('Log:\n')
+ ui.write('%s\n\n' % cs.comment)
+ ui.write('Members: \n')
+ for f in cs.entries:
+ fn = f.file
+ if fn.startswith(opts["prefix"]):
+ fn = fn[len(opts["prefix"]):]
+ ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
+ '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
+ ui.write('\n')
+
+ # have we seen the start tag?
+ if revisions and off:
+ if revisions[0] == str(cs.id) or \
+ revisions[0] in cs.tags:
+ off = False
+
+ # see if we reached the end tag
+ if len(revisions)>1 and not off:
+ if revisions[1] == str(cs.id) or \
+ revisions[1] in cs.tags:
+ break
diff --git a/sys/src/cmd/hg/hgext/convert/darcs.py b/sys/src/cmd/hg/hgext/convert/darcs.py
new file mode 100644
index 000000000..fd51f38bd
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/convert/darcs.py
@@ -0,0 +1,135 @@
+# darcs.py - darcs support for the convert extension
+#
+# Copyright 2007-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from common import NoRepo, checktool, commandline, commit, converter_source
+from mercurial.i18n import _
+from mercurial import util
+import os, shutil, tempfile
+
+# The naming drift of ElementTree is fun!
+
+try: from xml.etree.cElementTree import ElementTree
+except ImportError:
+ try: from xml.etree.ElementTree import ElementTree
+ except ImportError:
+ try: from elementtree.cElementTree import ElementTree
+ except ImportError:
+ try: from elementtree.ElementTree import ElementTree
+ except ImportError: ElementTree = None
+
+
+class darcs_source(converter_source, commandline):
+ def __init__(self, ui, path, rev=None):
+ converter_source.__init__(self, ui, path, rev=rev)
+ commandline.__init__(self, ui, 'darcs')
+
+ # check for _darcs, ElementTree, _darcs/inventory so that we can
+ # easily skip test-convert-darcs if ElementTree is not around
+ if not os.path.exists(os.path.join(path, '_darcs', 'inventories')):
+ raise NoRepo("%s does not look like a darcs repo" % path)
+
+ if not os.path.exists(os.path.join(path, '_darcs')):
+ raise NoRepo("%s does not look like a darcs repo" % path)
+
+ checktool('darcs')
+ version = self.run0('--version').splitlines()[0].strip()
+ if version < '2.1':
+ raise util.Abort(_('darcs version 2.1 or newer needed (found %r)') %
+ version)
+
+ if ElementTree is None:
+ raise util.Abort(_("Python ElementTree module is not available"))
+
+ self.path = os.path.realpath(path)
+
+ self.lastrev = None
+ self.changes = {}
+ self.parents = {}
+ self.tags = {}
+
+ def before(self):
+ self.tmppath = tempfile.mkdtemp(
+ prefix='convert-' + os.path.basename(self.path) + '-')
+ output, status = self.run('init', repodir=self.tmppath)
+ self.checkexit(status)
+
+ tree = self.xml('changes', xml_output=True, summary=True,
+ repodir=self.path)
+ tagname = None
+ child = None
+ for elt in tree.findall('patch'):
+ node = elt.get('hash')
+ name = elt.findtext('name', '')
+ if name.startswith('TAG '):
+ tagname = name[4:].strip()
+ elif tagname is not None:
+ self.tags[tagname] = node
+ tagname = None
+ self.changes[node] = elt
+ self.parents[child] = [node]
+ child = node
+ self.parents[child] = []
+
+ def after(self):
+ self.ui.debug(_('cleaning up %s\n') % self.tmppath)
+ shutil.rmtree(self.tmppath, ignore_errors=True)
+
+ def xml(self, cmd, **kwargs):
+ etree = ElementTree()
+ fp = self._run(cmd, **kwargs)
+ etree.parse(fp)
+ self.checkexit(fp.close())
+ return etree.getroot()
+
+ def getheads(self):
+ return self.parents[None]
+
+ def getcommit(self, rev):
+ elt = self.changes[rev]
+ date = util.strdate(elt.get('local_date'), '%a %b %d %H:%M:%S %Z %Y')
+ desc = elt.findtext('name') + '\n' + elt.findtext('comment', '')
+ return commit(author=elt.get('author'), date=util.datestr(date),
+ desc=desc.strip(), parents=self.parents[rev])
+
+ def pull(self, rev):
+ output, status = self.run('pull', self.path, all=True,
+ match='hash %s' % rev,
+ no_test=True, no_posthook=True,
+ external_merge='/bin/false',
+ repodir=self.tmppath)
+ if status:
+ if output.find('We have conflicts in') == -1:
+ self.checkexit(status, output)
+ output, status = self.run('revert', all=True, repodir=self.tmppath)
+ self.checkexit(status, output)
+
+ def getchanges(self, rev):
+ self.pull(rev)
+ copies = {}
+ changes = []
+ for elt in self.changes[rev].find('summary').getchildren():
+ if elt.tag in ('add_directory', 'remove_directory'):
+ continue
+ if elt.tag == 'move':
+ changes.append((elt.get('from'), rev))
+ copies[elt.get('from')] = elt.get('to')
+ else:
+ changes.append((elt.text.strip(), rev))
+ self.lastrev = rev
+ return sorted(changes), copies
+
+ def getfile(self, name, rev):
+ if rev != self.lastrev:
+ raise util.Abort(_('internal calling inconsistency'))
+ return open(os.path.join(self.tmppath, name), 'rb').read()
+
+ def getmode(self, name, rev):
+ mode = os.lstat(os.path.join(self.tmppath, name)).st_mode
+ return (mode & 0111) and 'x' or ''
+
+ def gettags(self):
+ return self.tags
diff --git a/sys/src/cmd/hg/hgext/convert/filemap.py b/sys/src/cmd/hg/hgext/convert/filemap.py
new file mode 100644
index 000000000..3c8307ae8
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/convert/filemap.py
@@ -0,0 +1,359 @@
+# Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
+# Copyright 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import shlex
+from mercurial.i18n import _
+from mercurial import util
+from common import SKIPREV, converter_source
+
+def rpairs(name):
+ yield '.', name
+ e = len(name)
+ while e != -1:
+ yield name[:e], name[e+1:]
+ e = name.rfind('/', 0, e)
+
+class filemapper(object):
+ '''Map and filter filenames when importing.
+ A name can be mapped to itself, a new name, or None (omit from new
+ repository).'''
+
+ def __init__(self, ui, path=None):
+ self.ui = ui
+ self.include = {}
+ self.exclude = {}
+ self.rename = {}
+ if path:
+ if self.parse(path):
+ raise util.Abort(_('errors in filemap'))
+
+ def parse(self, path):
+ errs = 0
+ def check(name, mapping, listname):
+ if name in mapping:
+ self.ui.warn(_('%s:%d: %r already in %s list\n') %
+ (lex.infile, lex.lineno, name, listname))
+ return 1
+ return 0
+ lex = shlex.shlex(open(path), path, True)
+ lex.wordchars += '!@#$%^&*()-=+[]{}|;:,./<>?'
+ cmd = lex.get_token()
+ while cmd:
+ if cmd == 'include':
+ name = lex.get_token()
+ errs += check(name, self.exclude, 'exclude')
+ self.include[name] = name
+ elif cmd == 'exclude':
+ name = lex.get_token()
+ errs += check(name, self.include, 'include')
+ errs += check(name, self.rename, 'rename')
+ self.exclude[name] = name
+ elif cmd == 'rename':
+ src = lex.get_token()
+ dest = lex.get_token()
+ errs += check(src, self.exclude, 'exclude')
+ self.rename[src] = dest
+ elif cmd == 'source':
+ errs += self.parse(lex.get_token())
+ else:
+ self.ui.warn(_('%s:%d: unknown directive %r\n') %
+ (lex.infile, lex.lineno, cmd))
+ errs += 1
+ cmd = lex.get_token()
+ return errs
+
+ def lookup(self, name, mapping):
+ for pre, suf in rpairs(name):
+ try:
+ return mapping[pre], pre, suf
+ except KeyError:
+ pass
+ return '', name, ''
+
+ def __call__(self, name):
+ if self.include:
+ inc = self.lookup(name, self.include)[0]
+ else:
+ inc = name
+ if self.exclude:
+ exc = self.lookup(name, self.exclude)[0]
+ else:
+ exc = ''
+ if not inc or exc:
+ return None
+ newpre, pre, suf = self.lookup(name, self.rename)
+ if newpre:
+ if newpre == '.':
+ return suf
+ if suf:
+ return newpre + '/' + suf
+ return newpre
+ return name
+
+ def active(self):
+ return bool(self.include or self.exclude or self.rename)
+
+# This class does two additional things compared to a regular source:
+#
+# - Filter and rename files. This is mostly wrapped by the filemapper
+# class above. We hide the original filename in the revision that is
+# returned by getchanges to be able to find things later in getfile
+# and getmode.
+#
+# - Return only revisions that matter for the files we're interested in.
+# This involves rewriting the parents of the original revision to
+# create a graph that is restricted to those revisions.
+#
+# This set of revisions includes not only revisions that directly
+# touch files we're interested in, but also merges that merge two
+# or more interesting revisions.
+
+class filemap_source(converter_source):
+ def __init__(self, ui, baseconverter, filemap):
+ super(filemap_source, self).__init__(ui)
+ self.base = baseconverter
+ self.filemapper = filemapper(ui, filemap)
+ self.commits = {}
+ # if a revision rev has parent p in the original revision graph, then
+ # rev will have parent self.parentmap[p] in the restricted graph.
+ self.parentmap = {}
+ # self.wantedancestors[rev] is the set of all ancestors of rev that
+ # are in the restricted graph.
+ self.wantedancestors = {}
+ self.convertedorder = None
+ self._rebuilt = False
+ self.origparents = {}
+ self.children = {}
+ self.seenchildren = {}
+
+ def before(self):
+ self.base.before()
+
+ def after(self):
+ self.base.after()
+
+ def setrevmap(self, revmap):
+ # rebuild our state to make things restartable
+ #
+ # To avoid calling getcommit for every revision that has already
+ # been converted, we rebuild only the parentmap, delaying the
+ # rebuild of wantedancestors until we need it (i.e. until a
+ # merge).
+ #
+ # We assume the order argument lists the revisions in
+ # topological order, so that we can infer which revisions were
+ # wanted by previous runs.
+ self._rebuilt = not revmap
+ seen = {SKIPREV: SKIPREV}
+ dummyset = set()
+ converted = []
+ for rev in revmap.order:
+ mapped = revmap[rev]
+ wanted = mapped not in seen
+ if wanted:
+ seen[mapped] = rev
+ self.parentmap[rev] = rev
+ else:
+ self.parentmap[rev] = seen[mapped]
+ self.wantedancestors[rev] = dummyset
+ arg = seen[mapped]
+ if arg == SKIPREV:
+ arg = None
+ converted.append((rev, wanted, arg))
+ self.convertedorder = converted
+ return self.base.setrevmap(revmap)
+
+ def rebuild(self):
+ if self._rebuilt:
+ return True
+ self._rebuilt = True
+ self.parentmap.clear()
+ self.wantedancestors.clear()
+ self.seenchildren.clear()
+ for rev, wanted, arg in self.convertedorder:
+ if rev not in self.origparents:
+ self.origparents[rev] = self.getcommit(rev).parents
+ if arg is not None:
+ self.children[arg] = self.children.get(arg, 0) + 1
+
+ for rev, wanted, arg in self.convertedorder:
+ parents = self.origparents[rev]
+ if wanted:
+ self.mark_wanted(rev, parents)
+ else:
+ self.mark_not_wanted(rev, arg)
+ self._discard(arg, *parents)
+
+ return True
+
+ def getheads(self):
+ return self.base.getheads()
+
+ def getcommit(self, rev):
+ # We want to save a reference to the commit objects to be able
+ # to rewrite their parents later on.
+ c = self.commits[rev] = self.base.getcommit(rev)
+ for p in c.parents:
+ self.children[p] = self.children.get(p, 0) + 1
+ return c
+
+ def _discard(self, *revs):
+ for r in revs:
+ if r is None:
+ continue
+ self.seenchildren[r] = self.seenchildren.get(r, 0) + 1
+ if self.seenchildren[r] == self.children[r]:
+ del self.wantedancestors[r]
+ del self.parentmap[r]
+ del self.seenchildren[r]
+ if self._rebuilt:
+ del self.children[r]
+
+ def wanted(self, rev, i):
+ # Return True if we're directly interested in rev.
+ #
+ # i is an index selecting one of the parents of rev (if rev
+ # has no parents, i is None). getchangedfiles will give us
+ # the list of files that are different in rev and in the parent
+ # indicated by i. If we're interested in any of these files,
+ # we're interested in rev.
+ try:
+ files = self.base.getchangedfiles(rev, i)
+ except NotImplementedError:
+ raise util.Abort(_("source repository doesn't support --filemap"))
+ for f in files:
+ if self.filemapper(f):
+ return True
+ return False
+
+ def mark_not_wanted(self, rev, p):
+ # Mark rev as not interesting and update data structures.
+
+ if p is None:
+ # A root revision. Use SKIPREV to indicate that it doesn't
+ # map to any revision in the restricted graph. Put SKIPREV
+ # in the set of wanted ancestors to simplify code elsewhere
+ self.parentmap[rev] = SKIPREV
+ self.wantedancestors[rev] = set((SKIPREV,))
+ return
+
+ # Reuse the data from our parent.
+ self.parentmap[rev] = self.parentmap[p]
+ self.wantedancestors[rev] = self.wantedancestors[p]
+
+ def mark_wanted(self, rev, parents):
+ # Mark rev ss wanted and update data structures.
+
+ # rev will be in the restricted graph, so children of rev in
+ # the original graph should still have rev as a parent in the
+ # restricted graph.
+ self.parentmap[rev] = rev
+
+ # The set of wanted ancestors of rev is the union of the sets
+ # of wanted ancestors of its parents. Plus rev itself.
+ wrev = set()
+ for p in parents:
+ wrev.update(self.wantedancestors[p])
+ wrev.add(rev)
+ self.wantedancestors[rev] = wrev
+
+ def getchanges(self, rev):
+ parents = self.commits[rev].parents
+ if len(parents) > 1:
+ self.rebuild()
+
+ # To decide whether we're interested in rev we:
+ #
+ # - calculate what parents rev will have if it turns out we're
+ # interested in it. If it's going to have more than 1 parent,
+ # we're interested in it.
+ #
+ # - otherwise, we'll compare it with the single parent we found.
+ # If any of the files we're interested in is different in the
+ # the two revisions, we're interested in rev.
+
+ # A parent p is interesting if its mapped version (self.parentmap[p]):
+ # - is not SKIPREV
+ # - is still not in the list of parents (we don't want duplicates)
+ # - is not an ancestor of the mapped versions of the other parents
+ mparents = []
+ wp = None
+ for i, p1 in enumerate(parents):
+ mp1 = self.parentmap[p1]
+ if mp1 == SKIPREV or mp1 in mparents:
+ continue
+ for p2 in parents:
+ if p1 == p2 or mp1 == self.parentmap[p2]:
+ continue
+ if mp1 in self.wantedancestors[p2]:
+ break
+ else:
+ mparents.append(mp1)
+ wp = i
+
+ if wp is None and parents:
+ wp = 0
+
+ self.origparents[rev] = parents
+
+ if len(mparents) < 2 and not self.wanted(rev, wp):
+ # We don't want this revision.
+ # Update our state and tell the convert process to map this
+ # revision to the same revision its parent as mapped to.
+ p = None
+ if parents:
+ p = parents[wp]
+ self.mark_not_wanted(rev, p)
+ self.convertedorder.append((rev, False, p))
+ self._discard(*parents)
+ return self.parentmap[rev]
+
+ # We want this revision.
+ # Rewrite the parents of the commit object
+ self.commits[rev].parents = mparents
+ self.mark_wanted(rev, parents)
+ self.convertedorder.append((rev, True, None))
+ self._discard(*parents)
+
+ # Get the real changes and do the filtering/mapping.
+ # To be able to get the files later on in getfile and getmode,
+ # we hide the original filename in the rev part of the return
+ # value.
+ changes, copies = self.base.getchanges(rev)
+ newnames = {}
+ files = []
+ for f, r in changes:
+ newf = self.filemapper(f)
+ if newf:
+ files.append((newf, (f, r)))
+ newnames[f] = newf
+
+ ncopies = {}
+ for c in copies:
+ newc = self.filemapper(c)
+ if newc:
+ newsource = self.filemapper(copies[c])
+ if newsource:
+ ncopies[newc] = newsource
+
+ return files, ncopies
+
+ def getfile(self, name, rev):
+ realname, realrev = rev
+ return self.base.getfile(realname, realrev)
+
+ def getmode(self, name, rev):
+ realname, realrev = rev
+ return self.base.getmode(realname, realrev)
+
+ def gettags(self):
+ return self.base.gettags()
+
+ def hasnativeorder(self):
+ return self.base.hasnativeorder()
+
+ def lookuprev(self, rev):
+ return self.base.lookuprev(rev)
diff --git a/sys/src/cmd/hg/hgext/convert/git.py b/sys/src/cmd/hg/hgext/convert/git.py
new file mode 100644
index 000000000..d529744ac
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/convert/git.py
@@ -0,0 +1,152 @@
+# git.py - git support for the convert extension
+#
+# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import os
+from mercurial import util
+
+from common import NoRepo, commit, converter_source, checktool
+
+class convert_git(converter_source):
+ # Windows does not support GIT_DIR= construct while other systems
+ # cannot remove environment variable. Just assume none have
+ # both issues.
+ if hasattr(os, 'unsetenv'):
+ def gitcmd(self, s):
+ prevgitdir = os.environ.get('GIT_DIR')
+ os.environ['GIT_DIR'] = self.path
+ try:
+ return util.popen(s, 'rb')
+ finally:
+ if prevgitdir is None:
+ del os.environ['GIT_DIR']
+ else:
+ os.environ['GIT_DIR'] = prevgitdir
+ else:
+ def gitcmd(self, s):
+ return util.popen('GIT_DIR=%s %s' % (self.path, s), 'rb')
+
+ def __init__(self, ui, path, rev=None):
+ super(convert_git, self).__init__(ui, path, rev=rev)
+
+ if os.path.isdir(path + "/.git"):
+ path += "/.git"
+ if not os.path.exists(path + "/objects"):
+ raise NoRepo("%s does not look like a Git repo" % path)
+
+ checktool('git', 'git')
+
+ self.path = path
+
+ def getheads(self):
+ if not self.rev:
+ return self.gitcmd('git rev-parse --branches --remotes').read().splitlines()
+ else:
+ fh = self.gitcmd("git rev-parse --verify %s" % self.rev)
+ return [fh.read()[:-1]]
+
+ def catfile(self, rev, type):
+ if rev == "0" * 40: raise IOError()
+ fh = self.gitcmd("git cat-file %s %s" % (type, rev))
+ return fh.read()
+
+ def getfile(self, name, rev):
+ return self.catfile(rev, "blob")
+
+ def getmode(self, name, rev):
+ return self.modecache[(name, rev)]
+
+ def getchanges(self, version):
+ self.modecache = {}
+ fh = self.gitcmd("git diff-tree -z --root -m -r %s" % version)
+ changes = []
+ seen = set()
+ entry = None
+ for l in fh.read().split('\x00'):
+ if not entry:
+ if not l.startswith(':'):
+ continue
+ entry = l
+ continue
+ f = l
+ if f not in seen:
+ seen.add(f)
+ entry = entry.split()
+ h = entry[3]
+ p = (entry[1] == "100755")
+ s = (entry[1] == "120000")
+ self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
+ changes.append((f, h))
+ entry = None
+ return (changes, {})
+
+ def getcommit(self, version):
+ c = self.catfile(version, "commit") # read the commit hash
+ end = c.find("\n\n")
+ message = c[end+2:]
+ message = self.recode(message)
+ l = c[:end].splitlines()
+ parents = []
+ author = committer = None
+ for e in l[1:]:
+ n, v = e.split(" ", 1)
+ if n == "author":
+ p = v.split()
+ tm, tz = p[-2:]
+ author = " ".join(p[:-2])
+ if author[0] == "<": author = author[1:-1]
+ author = self.recode(author)
+ if n == "committer":
+ p = v.split()
+ tm, tz = p[-2:]
+ committer = " ".join(p[:-2])
+ if committer[0] == "<": committer = committer[1:-1]
+ committer = self.recode(committer)
+ if n == "parent": parents.append(v)
+
+ if committer and committer != author:
+ message += "\ncommitter: %s\n" % committer
+ tzs, tzh, tzm = tz[-5:-4] + "1", tz[-4:-2], tz[-2:]
+ tz = -int(tzs) * (int(tzh) * 3600 + int(tzm))
+ date = tm + " " + str(tz)
+
+ c = commit(parents=parents, date=date, author=author, desc=message,
+ rev=version)
+ return c
+
+ def gettags(self):
+ tags = {}
+ fh = self.gitcmd('git ls-remote --tags "%s"' % self.path)
+ prefix = 'refs/tags/'
+ for line in fh:
+ line = line.strip()
+ if not line.endswith("^{}"):
+ continue
+ node, tag = line.split(None, 1)
+ if not tag.startswith(prefix):
+ continue
+ tag = tag[len(prefix):-3]
+ tags[tag] = node
+
+ return tags
+
+ def getchangedfiles(self, version, i):
+ changes = []
+ if i is None:
+ fh = self.gitcmd("git diff-tree --root -m -r %s" % version)
+ for l in fh:
+ if "\t" not in l:
+ continue
+ m, f = l[:-1].split("\t")
+ changes.append(f)
+ fh.close()
+ else:
+ fh = self.gitcmd('git diff-tree --name-only --root -r %s "%s^%s" --'
+ % (version, version, i+1))
+ changes = [f.rstrip('\n') for f in fh]
+ fh.close()
+
+ return changes
diff --git a/sys/src/cmd/hg/hgext/convert/gnuarch.py b/sys/src/cmd/hg/hgext/convert/gnuarch.py
new file mode 100644
index 000000000..8d2475e18
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/convert/gnuarch.py
@@ -0,0 +1,342 @@
+# gnuarch.py - GNU Arch support for the convert extension
+#
+# Copyright 2008, 2009 Aleix Conchillo Flaque <aleix@member.fsf.org>
+# and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from common import NoRepo, commandline, commit, converter_source
+from mercurial.i18n import _
+from mercurial import util
+import os, shutil, tempfile, stat, locale
+from email.Parser import Parser
+
+class gnuarch_source(converter_source, commandline):
+
+ class gnuarch_rev(object):
+ def __init__(self, rev):
+ self.rev = rev
+ self.summary = ''
+ self.date = None
+ self.author = ''
+ self.continuationof = None
+ self.add_files = []
+ self.mod_files = []
+ self.del_files = []
+ self.ren_files = {}
+ self.ren_dirs = {}
+
+ def __init__(self, ui, path, rev=None):
+ super(gnuarch_source, self).__init__(ui, path, rev=rev)
+
+ if not os.path.exists(os.path.join(path, '{arch}')):
+ raise NoRepo(_("%s does not look like a GNU Arch repo") % path)
+
+ # Could use checktool, but we want to check for baz or tla.
+ self.execmd = None
+ if util.find_exe('baz'):
+ self.execmd = 'baz'
+ else:
+ if util.find_exe('tla'):
+ self.execmd = 'tla'
+ else:
+ raise util.Abort(_('cannot find a GNU Arch tool'))
+
+ commandline.__init__(self, ui, self.execmd)
+
+ self.path = os.path.realpath(path)
+ self.tmppath = None
+
+ self.treeversion = None
+ self.lastrev = None
+ self.changes = {}
+ self.parents = {}
+ self.tags = {}
+ self.modecache = {}
+ self.catlogparser = Parser()
+ self.locale = locale.getpreferredencoding()
+ self.archives = []
+
+ def before(self):
+ # Get registered archives
+ self.archives = [i.rstrip('\n')
+ for i in self.runlines0('archives', '-n')]
+
+ if self.execmd == 'tla':
+ output = self.run0('tree-version', self.path)
+ else:
+ output = self.run0('tree-version', '-d', self.path)
+ self.treeversion = output.strip()
+
+ # Get name of temporary directory
+ version = self.treeversion.split('/')
+ self.tmppath = os.path.join(tempfile.gettempdir(),
+ 'hg-%s' % version[1])
+
+ # Generate parents dictionary
+ self.parents[None] = []
+ treeversion = self.treeversion
+ child = None
+ while treeversion:
+ self.ui.status(_('analyzing tree version %s...\n') % treeversion)
+
+ archive = treeversion.split('/')[0]
+ if archive not in self.archives:
+ self.ui.status(_('tree analysis stopped because it points to '
+ 'an unregistered archive %s...\n') % archive)
+ break
+
+ # Get the complete list of revisions for that tree version
+ output, status = self.runlines('revisions', '-r', '-f', treeversion)
+ self.checkexit(status, 'failed retrieveing revisions for %s' % treeversion)
+
+ # No new iteration unless a revision has a continuation-of header
+ treeversion = None
+
+ for l in output:
+ rev = l.strip()
+ self.changes[rev] = self.gnuarch_rev(rev)
+ self.parents[rev] = []
+
+ # Read author, date and summary
+ catlog, status = self.run('cat-log', '-d', self.path, rev)
+ if status:
+ catlog = self.run0('cat-archive-log', rev)
+ self._parsecatlog(catlog, rev)
+
+ # Populate the parents map
+ self.parents[child].append(rev)
+
+ # Keep track of the current revision as the child of the next
+ # revision scanned
+ child = rev
+
+ # Check if we have to follow the usual incremental history
+ # or if we have to 'jump' to a different treeversion given
+ # by the continuation-of header.
+ if self.changes[rev].continuationof:
+ treeversion = '--'.join(self.changes[rev].continuationof.split('--')[:-1])
+ break
+
+ # If we reached a base-0 revision w/o any continuation-of
+ # header, it means the tree history ends here.
+ if rev[-6:] == 'base-0':
+ break
+
+ def after(self):
+ self.ui.debug(_('cleaning up %s\n') % self.tmppath)
+ shutil.rmtree(self.tmppath, ignore_errors=True)
+
+ def getheads(self):
+ return self.parents[None]
+
+ def getfile(self, name, rev):
+ if rev != self.lastrev:
+ raise util.Abort(_('internal calling inconsistency'))
+
+ # Raise IOError if necessary (i.e. deleted files).
+ if not os.path.exists(os.path.join(self.tmppath, name)):
+ raise IOError
+
+ data, mode = self._getfile(name, rev)
+ self.modecache[(name, rev)] = mode
+
+ return data
+
+ def getmode(self, name, rev):
+ return self.modecache[(name, rev)]
+
+ def getchanges(self, rev):
+ self.modecache = {}
+ self._update(rev)
+ changes = []
+ copies = {}
+
+ for f in self.changes[rev].add_files:
+ changes.append((f, rev))
+
+ for f in self.changes[rev].mod_files:
+ changes.append((f, rev))
+
+ for f in self.changes[rev].del_files:
+ changes.append((f, rev))
+
+ for src in self.changes[rev].ren_files:
+ to = self.changes[rev].ren_files[src]
+ changes.append((src, rev))
+ changes.append((to, rev))
+ copies[to] = src
+
+ for src in self.changes[rev].ren_dirs:
+ to = self.changes[rev].ren_dirs[src]
+ chgs, cps = self._rendirchanges(src, to);
+ changes += [(f, rev) for f in chgs]
+ copies.update(cps)
+
+ self.lastrev = rev
+ return sorted(set(changes)), copies
+
+ def getcommit(self, rev):
+ changes = self.changes[rev]
+ return commit(author=changes.author, date=changes.date,
+ desc=changes.summary, parents=self.parents[rev], rev=rev)
+
+ def gettags(self):
+ return self.tags
+
+ def _execute(self, cmd, *args, **kwargs):
+ cmdline = [self.execmd, cmd]
+ cmdline += args
+ cmdline = [util.shellquote(arg) for arg in cmdline]
+ cmdline += ['>', util.nulldev, '2>', util.nulldev]
+ cmdline = util.quotecommand(' '.join(cmdline))
+ self.ui.debug(cmdline, '\n')
+ return os.system(cmdline)
+
+ def _update(self, rev):
+ self.ui.debug(_('applying revision %s...\n') % rev)
+ changeset, status = self.runlines('replay', '-d', self.tmppath,
+ rev)
+ if status:
+ # Something went wrong while merging (baz or tla
+ # issue?), get latest revision and try from there
+ shutil.rmtree(self.tmppath, ignore_errors=True)
+ self._obtainrevision(rev)
+ else:
+ old_rev = self.parents[rev][0]
+ self.ui.debug(_('computing changeset between %s and %s...\n')
+ % (old_rev, rev))
+ self._parsechangeset(changeset, rev)
+
+ def _getfile(self, name, rev):
+ mode = os.lstat(os.path.join(self.tmppath, name)).st_mode
+ if stat.S_ISLNK(mode):
+ data = os.readlink(os.path.join(self.tmppath, name))
+ mode = mode and 'l' or ''
+ else:
+ data = open(os.path.join(self.tmppath, name), 'rb').read()
+ mode = (mode & 0111) and 'x' or ''
+ return data, mode
+
+ def _exclude(self, name):
+ exclude = [ '{arch}', '.arch-ids', '.arch-inventory' ]
+ for exc in exclude:
+ if name.find(exc) != -1:
+ return True
+ return False
+
+ def _readcontents(self, path):
+ files = []
+ contents = os.listdir(path)
+ while len(contents) > 0:
+ c = contents.pop()
+ p = os.path.join(path, c)
+ # os.walk could be used, but here we avoid internal GNU
+ # Arch files and directories, thus saving a lot time.
+ if not self._exclude(p):
+ if os.path.isdir(p):
+ contents += [os.path.join(c, f) for f in os.listdir(p)]
+ else:
+ files.append(c)
+ return files
+
+ def _rendirchanges(self, src, dest):
+ changes = []
+ copies = {}
+ files = self._readcontents(os.path.join(self.tmppath, dest))
+ for f in files:
+ s = os.path.join(src, f)
+ d = os.path.join(dest, f)
+ changes.append(s)
+ changes.append(d)
+ copies[d] = s
+ return changes, copies
+
+ def _obtainrevision(self, rev):
+ self.ui.debug(_('obtaining revision %s...\n') % rev)
+ output = self._execute('get', rev, self.tmppath)
+ self.checkexit(output)
+ self.ui.debug(_('analyzing revision %s...\n') % rev)
+ files = self._readcontents(self.tmppath)
+ self.changes[rev].add_files += files
+
+ def _stripbasepath(self, path):
+ if path.startswith('./'):
+ return path[2:]
+ return path
+
+ def _parsecatlog(self, data, rev):
+ try:
+ catlog = self.catlogparser.parsestr(data)
+
+ # Commit date
+ self.changes[rev].date = util.datestr(
+ util.strdate(catlog['Standard-date'],
+ '%Y-%m-%d %H:%M:%S'))
+
+ # Commit author
+ self.changes[rev].author = self.recode(catlog['Creator'])
+
+ # Commit description
+ self.changes[rev].summary = '\n\n'.join((catlog['Summary'],
+ catlog.get_payload()))
+ self.changes[rev].summary = self.recode(self.changes[rev].summary)
+
+ # Commit revision origin when dealing with a branch or tag
+ if catlog.has_key('Continuation-of'):
+ self.changes[rev].continuationof = self.recode(catlog['Continuation-of'])
+ except Exception:
+ raise util.Abort(_('could not parse cat-log of %s') % rev)
+
+ def _parsechangeset(self, data, rev):
+ for l in data:
+ l = l.strip()
+ # Added file (ignore added directory)
+ if l.startswith('A') and not l.startswith('A/'):
+ file = self._stripbasepath(l[1:].strip())
+ if not self._exclude(file):
+ self.changes[rev].add_files.append(file)
+ # Deleted file (ignore deleted directory)
+ elif l.startswith('D') and not l.startswith('D/'):
+ file = self._stripbasepath(l[1:].strip())
+ if not self._exclude(file):
+ self.changes[rev].del_files.append(file)
+ # Modified binary file
+ elif l.startswith('Mb'):
+ file = self._stripbasepath(l[2:].strip())
+ if not self._exclude(file):
+ self.changes[rev].mod_files.append(file)
+ # Modified link
+ elif l.startswith('M->'):
+ file = self._stripbasepath(l[3:].strip())
+ if not self._exclude(file):
+ self.changes[rev].mod_files.append(file)
+ # Modified file
+ elif l.startswith('M'):
+ file = self._stripbasepath(l[1:].strip())
+ if not self._exclude(file):
+ self.changes[rev].mod_files.append(file)
+ # Renamed file (or link)
+ elif l.startswith('=>'):
+ files = l[2:].strip().split(' ')
+ if len(files) == 1:
+ files = l[2:].strip().split('\t')
+ src = self._stripbasepath(files[0])
+ dst = self._stripbasepath(files[1])
+ if not self._exclude(src) and not self._exclude(dst):
+ self.changes[rev].ren_files[src] = dst
+ # Conversion from file to link or from link to file (modified)
+ elif l.startswith('ch'):
+ file = self._stripbasepath(l[2:].strip())
+ if not self._exclude(file):
+ self.changes[rev].mod_files.append(file)
+ # Renamed directory
+ elif l.startswith('/>'):
+ dirs = l[2:].strip().split(' ')
+ if len(dirs) == 1:
+ dirs = l[2:].strip().split('\t')
+ src = self._stripbasepath(dirs[0])
+ dst = self._stripbasepath(dirs[1])
+ if not self._exclude(src) and not self._exclude(dst):
+ self.changes[rev].ren_dirs[src] = dst
diff --git a/sys/src/cmd/hg/hgext/convert/hg.py b/sys/src/cmd/hg/hgext/convert/hg.py
new file mode 100644
index 000000000..060c1430a
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/convert/hg.py
@@ -0,0 +1,363 @@
+# hg.py - hg backend for convert extension
+#
+# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+# Notes for hg->hg conversion:
+#
+# * Old versions of Mercurial didn't trim the whitespace from the ends
+# of commit messages, but new versions do. Changesets created by
+# those older versions, then converted, may thus have different
+# hashes for changesets that are otherwise identical.
+#
+# * Using "--config convert.hg.saverev=true" will make the source
+# identifier to be stored in the converted revision. This will cause
+# the converted revision to have a different identity than the
+# source.
+
+
+import os, time, cStringIO
+from mercurial.i18n import _
+from mercurial.node import bin, hex, nullid
+from mercurial import hg, util, context, error
+
+from common import NoRepo, commit, converter_source, converter_sink
+
+class mercurial_sink(converter_sink):
+ def __init__(self, ui, path):
+ converter_sink.__init__(self, ui, path)
+ self.branchnames = ui.configbool('convert', 'hg.usebranchnames', True)
+ self.clonebranches = ui.configbool('convert', 'hg.clonebranches', False)
+ self.tagsbranch = ui.config('convert', 'hg.tagsbranch', 'default')
+ self.lastbranch = None
+ if os.path.isdir(path) and len(os.listdir(path)) > 0:
+ try:
+ self.repo = hg.repository(self.ui, path)
+ if not self.repo.local():
+ raise NoRepo(_('%s is not a local Mercurial repo') % path)
+ except error.RepoError, err:
+ ui.traceback()
+ raise NoRepo(err.args[0])
+ else:
+ try:
+ ui.status(_('initializing destination %s repository\n') % path)
+ self.repo = hg.repository(self.ui, path, create=True)
+ if not self.repo.local():
+ raise NoRepo(_('%s is not a local Mercurial repo') % path)
+ self.created.append(path)
+ except error.RepoError:
+ ui.traceback()
+ raise NoRepo("could not create hg repo %s as sink" % path)
+ self.lock = None
+ self.wlock = None
+ self.filemapmode = False
+
+ def before(self):
+ self.ui.debug(_('run hg sink pre-conversion action\n'))
+ self.wlock = self.repo.wlock()
+ self.lock = self.repo.lock()
+
+ def after(self):
+ self.ui.debug(_('run hg sink post-conversion action\n'))
+ self.lock.release()
+ self.wlock.release()
+
+ def revmapfile(self):
+ return os.path.join(self.path, ".hg", "shamap")
+
+ def authorfile(self):
+ return os.path.join(self.path, ".hg", "authormap")
+
+ def getheads(self):
+ h = self.repo.changelog.heads()
+ return [ hex(x) for x in h ]
+
+ def setbranch(self, branch, pbranches):
+ if not self.clonebranches:
+ return
+
+ setbranch = (branch != self.lastbranch)
+ self.lastbranch = branch
+ if not branch:
+ branch = 'default'
+ pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches]
+ pbranch = pbranches and pbranches[0][1] or 'default'
+
+ branchpath = os.path.join(self.path, branch)
+ if setbranch:
+ self.after()
+ try:
+ self.repo = hg.repository(self.ui, branchpath)
+ except:
+ self.repo = hg.repository(self.ui, branchpath, create=True)
+ self.before()
+
+ # pbranches may bring revisions from other branches (merge parents)
+ # Make sure we have them, or pull them.
+ missings = {}
+ for b in pbranches:
+ try:
+ self.repo.lookup(b[0])
+ except:
+ missings.setdefault(b[1], []).append(b[0])
+
+ if missings:
+ self.after()
+ for pbranch, heads in missings.iteritems():
+ pbranchpath = os.path.join(self.path, pbranch)
+ prepo = hg.repository(self.ui, pbranchpath)
+ self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch))
+ self.repo.pull(prepo, [prepo.lookup(h) for h in heads])
+ self.before()
+
+ def _rewritetags(self, source, revmap, data):
+ fp = cStringIO.StringIO()
+ for line in data.splitlines():
+ s = line.split(' ', 1)
+ if len(s) != 2:
+ continue
+ revid = revmap.get(source.lookuprev(s[0]))
+ if not revid:
+ continue
+ fp.write('%s %s\n' % (revid, s[1]))
+ return fp.getvalue()
+
+ def putcommit(self, files, copies, parents, commit, source, revmap):
+
+ files = dict(files)
+ def getfilectx(repo, memctx, f):
+ v = files[f]
+ data = source.getfile(f, v)
+ e = source.getmode(f, v)
+ if f == '.hgtags':
+ data = self._rewritetags(source, revmap, data)
+ return context.memfilectx(f, data, 'l' in e, 'x' in e, copies.get(f))
+
+ pl = []
+ for p in parents:
+ if p not in pl:
+ pl.append(p)
+ parents = pl
+ nparents = len(parents)
+ if self.filemapmode and nparents == 1:
+ m1node = self.repo.changelog.read(bin(parents[0]))[0]
+ parent = parents[0]
+
+ if len(parents) < 2: parents.append(nullid)
+ if len(parents) < 2: parents.append(nullid)
+ p2 = parents.pop(0)
+
+ text = commit.desc
+ extra = commit.extra.copy()
+ if self.branchnames and commit.branch:
+ extra['branch'] = commit.branch
+ if commit.rev:
+ extra['convert_revision'] = commit.rev
+
+ while parents:
+ p1 = p2
+ p2 = parents.pop(0)
+ ctx = context.memctx(self.repo, (p1, p2), text, files.keys(), getfilectx,
+ commit.author, commit.date, extra)
+ self.repo.commitctx(ctx)
+ text = "(octopus merge fixup)\n"
+ p2 = hex(self.repo.changelog.tip())
+
+ if self.filemapmode and nparents == 1:
+ man = self.repo.manifest
+ mnode = self.repo.changelog.read(bin(p2))[0]
+ if not man.cmp(m1node, man.revision(mnode)):
+ self.ui.status(_("filtering out empty revision\n"))
+ self.repo.rollback()
+ return parent
+ return p2
+
+ def puttags(self, tags):
+ try:
+ parentctx = self.repo[self.tagsbranch]
+ tagparent = parentctx.node()
+ except error.RepoError:
+ parentctx = None
+ tagparent = nullid
+
+ try:
+ oldlines = sorted(parentctx['.hgtags'].data().splitlines(True))
+ except:
+ oldlines = []
+
+ newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags])
+ if newlines == oldlines:
+ return None
+ data = "".join(newlines)
+ def getfilectx(repo, memctx, f):
+ return context.memfilectx(f, data, False, False, None)
+
+ self.ui.status(_("updating tags\n"))
+ date = "%s 0" % int(time.mktime(time.gmtime()))
+ extra = {'branch': self.tagsbranch}
+ ctx = context.memctx(self.repo, (tagparent, None), "update tags",
+ [".hgtags"], getfilectx, "convert-repo", date,
+ extra)
+ self.repo.commitctx(ctx)
+ return hex(self.repo.changelog.tip())
+
+ def setfilemapmode(self, active):
+ self.filemapmode = active
+
+class mercurial_source(converter_source):
+ def __init__(self, ui, path, rev=None):
+ converter_source.__init__(self, ui, path, rev)
+ self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors', False)
+ self.ignored = set()
+ self.saverev = ui.configbool('convert', 'hg.saverev', False)
+ try:
+ self.repo = hg.repository(self.ui, path)
+ # try to provoke an exception if this isn't really a hg
+ # repo, but some other bogus compatible-looking url
+ if not self.repo.local():
+ raise error.RepoError()
+ except error.RepoError:
+ ui.traceback()
+ raise NoRepo("%s is not a local Mercurial repo" % path)
+ self.lastrev = None
+ self.lastctx = None
+ self._changescache = None
+ self.convertfp = None
+ # Restrict converted revisions to startrev descendants
+ startnode = ui.config('convert', 'hg.startrev')
+ if startnode is not None:
+ try:
+ startnode = self.repo.lookup(startnode)
+ except error.RepoError:
+ raise util.Abort(_('%s is not a valid start revision')
+ % startnode)
+ startrev = self.repo.changelog.rev(startnode)
+ children = {startnode: 1}
+ for rev in self.repo.changelog.descendants(startrev):
+ children[self.repo.changelog.node(rev)] = 1
+ self.keep = children.__contains__
+ else:
+ self.keep = util.always
+
+ def changectx(self, rev):
+ if self.lastrev != rev:
+ self.lastctx = self.repo[rev]
+ self.lastrev = rev
+ return self.lastctx
+
+ def parents(self, ctx):
+ return [p.node() for p in ctx.parents()
+ if p and self.keep(p.node())]
+
+ def getheads(self):
+ if self.rev:
+ heads = [self.repo[self.rev].node()]
+ else:
+ heads = self.repo.heads()
+ return [hex(h) for h in heads if self.keep(h)]
+
+ def getfile(self, name, rev):
+ try:
+ return self.changectx(rev)[name].data()
+ except error.LookupError, err:
+ raise IOError(err)
+
+ def getmode(self, name, rev):
+ return self.changectx(rev).manifest().flags(name)
+
+ def getchanges(self, rev):
+ ctx = self.changectx(rev)
+ parents = self.parents(ctx)
+ if not parents:
+ files = sorted(ctx.manifest())
+ if self.ignoreerrors:
+ # calling getcopies() is a simple way to detect missing
+ # revlogs and populate self.ignored
+ self.getcopies(ctx, files)
+ return [(f, rev) for f in files if f not in self.ignored], {}
+ if self._changescache and self._changescache[0] == rev:
+ m, a, r = self._changescache[1]
+ else:
+ m, a, r = self.repo.status(parents[0], ctx.node())[:3]
+ # getcopies() detects missing revlogs early, run it before
+ # filtering the changes.
+ copies = self.getcopies(ctx, m + a)
+ changes = [(name, rev) for name in m + a + r
+ if name not in self.ignored]
+ return sorted(changes), copies
+
+ def getcopies(self, ctx, files):
+ copies = {}
+ for name in files:
+ if name in self.ignored:
+ continue
+ try:
+ copysource, copynode = ctx.filectx(name).renamed()
+ if copysource in self.ignored or not self.keep(copynode):
+ continue
+ copies[name] = copysource
+ except TypeError:
+ pass
+ except error.LookupError, e:
+ if not self.ignoreerrors:
+ raise
+ self.ignored.add(name)
+ self.ui.warn(_('ignoring: %s\n') % e)
+ return copies
+
+ def getcommit(self, rev):
+ ctx = self.changectx(rev)
+ parents = [hex(p) for p in self.parents(ctx)]
+ if self.saverev:
+ crev = rev
+ else:
+ crev = None
+ return commit(author=ctx.user(), date=util.datestr(ctx.date()),
+ desc=ctx.description(), rev=crev, parents=parents,
+ branch=ctx.branch(), extra=ctx.extra(),
+ sortkey=ctx.rev())
+
+ def gettags(self):
+ tags = [t for t in self.repo.tagslist() if t[0] != 'tip']
+ return dict([(name, hex(node)) for name, node in tags
+ if self.keep(node)])
+
+ def getchangedfiles(self, rev, i):
+ ctx = self.changectx(rev)
+ parents = self.parents(ctx)
+ if not parents and i is None:
+ i = 0
+ changes = [], ctx.manifest().keys(), []
+ else:
+ i = i or 0
+ changes = self.repo.status(parents[i], ctx.node())[:3]
+ changes = [[f for f in l if f not in self.ignored] for l in changes]
+
+ if i == 0:
+ self._changescache = (rev, changes)
+
+ return changes[0] + changes[1] + changes[2]
+
+ def converted(self, rev, destrev):
+ if self.convertfp is None:
+ self.convertfp = open(os.path.join(self.path, '.hg', 'shamap'),
+ 'a')
+ self.convertfp.write('%s %s\n' % (destrev, rev))
+ self.convertfp.flush()
+
+ def before(self):
+ self.ui.debug(_('run hg source pre-conversion action\n'))
+
+ def after(self):
+ self.ui.debug(_('run hg source post-conversion action\n'))
+
+ def hasnativeorder(self):
+ return True
+
+ def lookuprev(self, rev):
+ try:
+ return hex(self.repo.lookup(rev))
+ except error.RepoError:
+ return None
diff --git a/sys/src/cmd/hg/hgext/convert/monotone.py b/sys/src/cmd/hg/hgext/convert/monotone.py
new file mode 100644
index 000000000..085510ce9
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/convert/monotone.py
@@ -0,0 +1,217 @@
+# monotone.py - monotone support for the convert extension
+#
+# Copyright 2008, 2009 Mikkel Fahnoe Jorgensen <mikkel@dvide.com> and
+# others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import os, re
+from mercurial import util
+from common import NoRepo, commit, converter_source, checktool
+from common import commandline
+from mercurial.i18n import _
+
+class monotone_source(converter_source, commandline):
+ def __init__(self, ui, path=None, rev=None):
+ converter_source.__init__(self, ui, path, rev)
+ commandline.__init__(self, ui, 'mtn')
+
+ self.ui = ui
+ self.path = path
+
+ norepo = NoRepo (_("%s does not look like a monotone repo") % path)
+ if not os.path.exists(os.path.join(path, '_MTN')):
+ # Could be a monotone repository (SQLite db file)
+ try:
+ header = file(path, 'rb').read(16)
+ except:
+ header = ''
+ if header != 'SQLite format 3\x00':
+ raise norepo
+
+ # regular expressions for parsing monotone output
+ space = r'\s*'
+ name = r'\s+"((?:\\"|[^"])*)"\s*'
+ value = name
+ revision = r'\s+\[(\w+)\]\s*'
+ lines = r'(?:.|\n)+'
+
+ self.dir_re = re.compile(space + "dir" + name)
+ self.file_re = re.compile(space + "file" + name + "content" + revision)
+ self.add_file_re = re.compile(space + "add_file" + name + "content" + revision)
+ self.patch_re = re.compile(space + "patch" + name + "from" + revision + "to" + revision)
+ self.rename_re = re.compile(space + "rename" + name + "to" + name)
+ self.delete_re = re.compile(space + "delete" + name)
+ self.tag_re = re.compile(space + "tag" + name + "revision" + revision)
+ self.cert_re = re.compile(lines + space + "name" + name + "value" + value)
+
+ attr = space + "file" + lines + space + "attr" + space
+ self.attr_execute_re = re.compile(attr + '"mtn:execute"' + space + '"true"')
+
+ # cached data
+ self.manifest_rev = None
+ self.manifest = None
+ self.files = None
+ self.dirs = None
+
+ checktool('mtn', abort=False)
+
+ # test if there are any revisions
+ self.rev = None
+ try:
+ self.getheads()
+ except:
+ raise norepo
+ self.rev = rev
+
+ def mtnrun(self, *args, **kwargs):
+ kwargs['d'] = self.path
+ return self.run0('automate', *args, **kwargs)
+
+ def mtnloadmanifest(self, rev):
+ if self.manifest_rev == rev:
+ return
+ self.manifest = self.mtnrun("get_manifest_of", rev).split("\n\n")
+ self.manifest_rev = rev
+ self.files = {}
+ self.dirs = {}
+
+ for e in self.manifest:
+ m = self.file_re.match(e)
+ if m:
+ attr = ""
+ name = m.group(1)
+ node = m.group(2)
+ if self.attr_execute_re.match(e):
+ attr += "x"
+ self.files[name] = (node, attr)
+ m = self.dir_re.match(e)
+ if m:
+ self.dirs[m.group(1)] = True
+
+ def mtnisfile(self, name, rev):
+ # a non-file could be a directory or a deleted or renamed file
+ self.mtnloadmanifest(rev)
+ return name in self.files
+
+ def mtnisdir(self, name, rev):
+ self.mtnloadmanifest(rev)
+ return name in self.dirs
+
+ def mtngetcerts(self, rev):
+ certs = {"author":"<missing>", "date":"<missing>",
+ "changelog":"<missing>", "branch":"<missing>"}
+ cert_list = self.mtnrun("certs", rev).split('\n\n key "')
+ for e in cert_list:
+ m = self.cert_re.match(e)
+ if m:
+ name, value = m.groups()
+ value = value.replace(r'\"', '"')
+ value = value.replace(r'\\', '\\')
+ certs[name] = value
+ # Monotone may have subsecond dates: 2005-02-05T09:39:12.364306
+ # and all times are stored in UTC
+ certs["date"] = certs["date"].split('.')[0] + " UTC"
+ return certs
+
+ # implement the converter_source interface:
+
+ def getheads(self):
+ if not self.rev:
+ return self.mtnrun("leaves").splitlines()
+ else:
+ return [self.rev]
+
+ def getchanges(self, rev):
+ #revision = self.mtncmd("get_revision %s" % rev).split("\n\n")
+ revision = self.mtnrun("get_revision", rev).split("\n\n")
+ files = {}
+ ignoremove = {}
+ renameddirs = []
+ copies = {}
+ for e in revision:
+ m = self.add_file_re.match(e)
+ if m:
+ files[m.group(1)] = rev
+ ignoremove[m.group(1)] = rev
+ m = self.patch_re.match(e)
+ if m:
+ files[m.group(1)] = rev
+ # Delete/rename is handled later when the convert engine
+ # discovers an IOError exception from getfile,
+ # but only if we add the "from" file to the list of changes.
+ m = self.delete_re.match(e)
+ if m:
+ files[m.group(1)] = rev
+ m = self.rename_re.match(e)
+ if m:
+ toname = m.group(2)
+ fromname = m.group(1)
+ if self.mtnisfile(toname, rev):
+ ignoremove[toname] = 1
+ copies[toname] = fromname
+ files[toname] = rev
+ files[fromname] = rev
+ elif self.mtnisdir(toname, rev):
+ renameddirs.append((fromname, toname))
+
+ # Directory renames can be handled only once we have recorded
+ # all new files
+ for fromdir, todir in renameddirs:
+ renamed = {}
+ for tofile in self.files:
+ if tofile in ignoremove:
+ continue
+ if tofile.startswith(todir + '/'):
+ renamed[tofile] = fromdir + tofile[len(todir):]
+ # Avoid chained moves like:
+ # d1(/a) => d3/d1(/a)
+ # d2 => d3
+ ignoremove[tofile] = 1
+ for tofile, fromfile in renamed.items():
+ self.ui.debug (_("copying file in renamed directory "
+ "from '%s' to '%s'")
+ % (fromfile, tofile), '\n')
+ files[tofile] = rev
+ copies[tofile] = fromfile
+ for fromfile in renamed.values():
+ files[fromfile] = rev
+
+ return (files.items(), copies)
+
+ def getmode(self, name, rev):
+ self.mtnloadmanifest(rev)
+ node, attr = self.files.get(name, (None, ""))
+ return attr
+
+ def getfile(self, name, rev):
+ if not self.mtnisfile(name, rev):
+ raise IOError() # file was deleted or renamed
+ try:
+ return self.mtnrun("get_file_of", name, r=rev)
+ except:
+ raise IOError() # file was deleted or renamed
+
+ def getcommit(self, rev):
+ certs = self.mtngetcerts(rev)
+ return commit(
+ author=certs["author"],
+ date=util.datestr(util.strdate(certs["date"], "%Y-%m-%dT%H:%M:%S")),
+ desc=certs["changelog"],
+ rev=rev,
+ parents=self.mtnrun("parents", rev).splitlines(),
+ branch=certs["branch"])
+
+ def gettags(self):
+ tags = {}
+ for e in self.mtnrun("tags").split("\n\n"):
+ m = self.tag_re.match(e)
+ if m:
+ tags[m.group(1)] = m.group(2)
+ return tags
+
+ def getchangedfiles(self, rev, i):
+ # This function is only needed to support --filemap
+ # ... and we don't support that
+ raise NotImplementedError()
diff --git a/sys/src/cmd/hg/hgext/convert/p4.py b/sys/src/cmd/hg/hgext/convert/p4.py
new file mode 100644
index 000000000..d65867126
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/convert/p4.py
@@ -0,0 +1,205 @@
+#
+# Perforce source for convert extension.
+#
+# Copyright 2009, Frank Kingswood <frank@kingswood-consulting.co.uk>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+#
+
+from mercurial import util
+from mercurial.i18n import _
+
+from common import commit, converter_source, checktool, NoRepo
+import marshal
+import re
+
+def loaditer(f):
+ "Yield the dictionary objects generated by p4"
+ try:
+ while True:
+ d = marshal.load(f)
+ if not d:
+ break
+ yield d
+ except EOFError:
+ pass
+
+class p4_source(converter_source):
+ def __init__(self, ui, path, rev=None):
+ super(p4_source, self).__init__(ui, path, rev=rev)
+
+ if "/" in path and not path.startswith('//'):
+ raise NoRepo('%s does not look like a P4 repo' % path)
+
+ checktool('p4', abort=False)
+
+ self.p4changes = {}
+ self.heads = {}
+ self.changeset = {}
+ self.files = {}
+ self.tags = {}
+ self.lastbranch = {}
+ self.parent = {}
+ self.encoding = "latin_1"
+ self.depotname = {} # mapping from local name to depot name
+ self.modecache = {}
+ self.re_type = re.compile("([a-z]+)?(text|binary|symlink|apple|resource|unicode|utf\d+)(\+\w+)?$")
+ self.re_keywords = re.compile(r"\$(Id|Header|Date|DateTime|Change|File|Revision|Author):[^$\n]*\$")
+ self.re_keywords_old = re.compile("\$(Id|Header):[^$\n]*\$")
+
+ self._parse(ui, path)
+
+ def _parse_view(self, path):
+ "Read changes affecting the path"
+ cmd = 'p4 -G changes -s submitted "%s"' % path
+ stdout = util.popen(cmd)
+ for d in loaditer(stdout):
+ c = d.get("change", None)
+ if c:
+ self.p4changes[c] = True
+
+ def _parse(self, ui, path):
+ "Prepare list of P4 filenames and revisions to import"
+ ui.status(_('reading p4 views\n'))
+
+ # read client spec or view
+ if "/" in path:
+ self._parse_view(path)
+ if path.startswith("//") and path.endswith("/..."):
+ views = {path[:-3]:""}
+ else:
+ views = {"//": ""}
+ else:
+ cmd = 'p4 -G client -o "%s"' % path
+ clientspec = marshal.load(util.popen(cmd))
+
+ views = {}
+ for client in clientspec:
+ if client.startswith("View"):
+ sview, cview = clientspec[client].split()
+ self._parse_view(sview)
+ if sview.endswith("...") and cview.endswith("..."):
+ sview = sview[:-3]
+ cview = cview[:-3]
+ cview = cview[2:]
+ cview = cview[cview.find("/") + 1:]
+ views[sview] = cview
+
+ # list of changes that affect our source files
+ self.p4changes = self.p4changes.keys()
+ self.p4changes.sort(key=int)
+
+ # list with depot pathnames, longest first
+ vieworder = views.keys()
+ vieworder.sort(key=len, reverse=True)
+
+ # handle revision limiting
+ startrev = self.ui.config('convert', 'p4.startrev', default=0)
+ self.p4changes = [x for x in self.p4changes
+ if ((not startrev or int(x) >= int(startrev)) and
+ (not self.rev or int(x) <= int(self.rev)))]
+
+ # now read the full changelists to get the list of file revisions
+ ui.status(_('collecting p4 changelists\n'))
+ lastid = None
+ for change in self.p4changes:
+ cmd = "p4 -G describe %s" % change
+ stdout = util.popen(cmd)
+ d = marshal.load(stdout)
+
+ desc = self.recode(d["desc"])
+ shortdesc = desc.split("\n", 1)[0]
+ t = '%s %s' % (d["change"], repr(shortdesc)[1:-1])
+ ui.status(util.ellipsis(t, 80) + '\n')
+
+ if lastid:
+ parents = [lastid]
+ else:
+ parents = []
+
+ date = (int(d["time"]), 0) # timezone not set
+ c = commit(author=self.recode(d["user"]), date=util.datestr(date),
+ parents=parents, desc=desc, branch='', extra={"p4": change})
+
+ files = []
+ i = 0
+ while ("depotFile%d" % i) in d and ("rev%d" % i) in d:
+ oldname = d["depotFile%d" % i]
+ filename = None
+ for v in vieworder:
+ if oldname.startswith(v):
+ filename = views[v] + oldname[len(v):]
+ break
+ if filename:
+ files.append((filename, d["rev%d" % i]))
+ self.depotname[filename] = oldname
+ i += 1
+ self.changeset[change] = c
+ self.files[change] = files
+ lastid = change
+
+ if lastid:
+ self.heads = [lastid]
+
+ def getheads(self):
+ return self.heads
+
+ def getfile(self, name, rev):
+ cmd = 'p4 -G print "%s#%s"' % (self.depotname[name], rev)
+ stdout = util.popen(cmd)
+
+ mode = None
+ contents = ""
+ keywords = None
+
+ for d in loaditer(stdout):
+ code = d["code"]
+ data = d.get("data")
+
+ if code == "error":
+ raise IOError(d["generic"], data)
+
+ elif code == "stat":
+ p4type = self.re_type.match(d["type"])
+ if p4type:
+ mode = ""
+ flags = (p4type.group(1) or "") + (p4type.group(3) or "")
+ if "x" in flags:
+ mode = "x"
+ if p4type.group(2) == "symlink":
+ mode = "l"
+ if "ko" in flags:
+ keywords = self.re_keywords_old
+ elif "k" in flags:
+ keywords = self.re_keywords
+
+ elif code == "text" or code == "binary":
+ contents += data
+
+ if mode is None:
+ raise IOError(0, "bad stat")
+
+ self.modecache[(name, rev)] = mode
+
+ if keywords:
+ contents = keywords.sub("$\\1$", contents)
+ if mode == "l" and contents.endswith("\n"):
+ contents = contents[:-1]
+
+ return contents
+
+ def getmode(self, name, rev):
+ return self.modecache[(name, rev)]
+
+ def getchanges(self, rev):
+ return self.files[rev], {}
+
+ def getcommit(self, rev):
+ return self.changeset[rev]
+
+ def gettags(self):
+ return self.tags
+
+ def getchangedfiles(self, rev, i):
+ return sorted([x[0] for x in self.files[rev]])
diff --git a/sys/src/cmd/hg/hgext/convert/subversion.py b/sys/src/cmd/hg/hgext/convert/subversion.py
new file mode 100644
index 000000000..5a0367485
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/convert/subversion.py
@@ -0,0 +1,1136 @@
+# Subversion 1.4/1.5 Python API backend
+#
+# Copyright(C) 2007 Daniel Holth et al
+
+import os
+import re
+import sys
+import cPickle as pickle
+import tempfile
+import urllib
+
+from mercurial import strutil, util, encoding
+from mercurial.i18n import _
+
+# Subversion stuff. Works best with very recent Python SVN bindings
+# e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
+# these bindings.
+
+from cStringIO import StringIO
+
+from common import NoRepo, MissingTool, commit, encodeargs, decodeargs
+from common import commandline, converter_source, converter_sink, mapfile
+
+try:
+ from svn.core import SubversionException, Pool
+ import svn
+ import svn.client
+ import svn.core
+ import svn.ra
+ import svn.delta
+ import transport
+ import warnings
+ warnings.filterwarnings('ignore',
+ module='svn.core',
+ category=DeprecationWarning)
+
+except ImportError:
+ pass
+
+class SvnPathNotFound(Exception):
+ pass
+
+def geturl(path):
+ try:
+ return svn.client.url_from_path(svn.core.svn_path_canonicalize(path))
+ except SubversionException:
+ pass
+ if os.path.isdir(path):
+ path = os.path.normpath(os.path.abspath(path))
+ if os.name == 'nt':
+ path = '/' + util.normpath(path)
+ # Module URL is later compared with the repository URL returned
+ # by svn API, which is UTF-8.
+ path = encoding.tolocal(path)
+ return 'file://%s' % urllib.quote(path)
+ return path
+
+def optrev(number):
+ optrev = svn.core.svn_opt_revision_t()
+ optrev.kind = svn.core.svn_opt_revision_number
+ optrev.value.number = number
+ return optrev
+
+class changedpath(object):
+ def __init__(self, p):
+ self.copyfrom_path = p.copyfrom_path
+ self.copyfrom_rev = p.copyfrom_rev
+ self.action = p.action
+
+def get_log_child(fp, url, paths, start, end, limit=0, discover_changed_paths=True,
+ strict_node_history=False):
+ protocol = -1
+ def receiver(orig_paths, revnum, author, date, message, pool):
+ if orig_paths is not None:
+ for k, v in orig_paths.iteritems():
+ orig_paths[k] = changedpath(v)
+ pickle.dump((orig_paths, revnum, author, date, message),
+ fp, protocol)
+
+ try:
+ # Use an ra of our own so that our parent can consume
+ # our results without confusing the server.
+ t = transport.SvnRaTransport(url=url)
+ svn.ra.get_log(t.ra, paths, start, end, limit,
+ discover_changed_paths,
+ strict_node_history,
+ receiver)
+ except SubversionException, (inst, num):
+ pickle.dump(num, fp, protocol)
+ except IOError:
+ # Caller may interrupt the iteration
+ pickle.dump(None, fp, protocol)
+ else:
+ pickle.dump(None, fp, protocol)
+ fp.close()
+ # With large history, cleanup process goes crazy and suddenly
+ # consumes *huge* amount of memory. The output file being closed,
+ # there is no need for clean termination.
+ os._exit(0)
+
+def debugsvnlog(ui, **opts):
+ """Fetch SVN log in a subprocess and channel them back to parent to
+ avoid memory collection issues.
+ """
+ util.set_binary(sys.stdin)
+ util.set_binary(sys.stdout)
+ args = decodeargs(sys.stdin.read())
+ get_log_child(sys.stdout, *args)
+
+class logstream(object):
+ """Interruptible revision log iterator."""
+ def __init__(self, stdout):
+ self._stdout = stdout
+
+ def __iter__(self):
+ while True:
+ entry = pickle.load(self._stdout)
+ try:
+ orig_paths, revnum, author, date, message = entry
+ except:
+ if entry is None:
+ break
+ raise SubversionException("child raised exception", entry)
+ yield entry
+
+ def close(self):
+ if self._stdout:
+ self._stdout.close()
+ self._stdout = None
+
+
+# Check to see if the given path is a local Subversion repo. Verify this by
+# looking for several svn-specific files and directories in the given
+# directory.
+def filecheck(path, proto):
+ for x in ('locks', 'hooks', 'format', 'db', ):
+ if not os.path.exists(os.path.join(path, x)):
+ return False
+ return True
+
+# Check to see if a given path is the root of an svn repo over http. We verify
+# this by requesting a version-controlled URL we know can't exist and looking
+# for the svn-specific "not found" XML.
+def httpcheck(path, proto):
+ return ('<m:human-readable errcode="160013">' in
+ urllib.urlopen('%s://%s/!svn/ver/0/.svn' % (proto, path)).read())
+
+protomap = {'http': httpcheck,
+ 'https': httpcheck,
+ 'file': filecheck,
+ }
+def issvnurl(url):
+ try:
+ proto, path = url.split('://', 1)
+ path = urllib.url2pathname(path)
+ except ValueError:
+ proto = 'file'
+ path = os.path.abspath(url)
+ path = path.replace(os.sep, '/')
+ check = protomap.get(proto, lambda p, p2: False)
+ while '/' in path:
+ if check(path, proto):
+ return True
+ path = path.rsplit('/', 1)[0]
+ return False
+
+# SVN conversion code stolen from bzr-svn and tailor
+#
+# Subversion looks like a versioned filesystem, branches structures
+# are defined by conventions and not enforced by the tool. First,
+# we define the potential branches (modules) as "trunk" and "branches"
+# children directories. Revisions are then identified by their
+# module and revision number (and a repository identifier).
+#
+# The revision graph is really a tree (or a forest). By default, a
+# revision parent is the previous revision in the same module. If the
+# module directory is copied/moved from another module then the
+# revision is the module root and its parent the source revision in
+# the parent module. A revision has at most one parent.
+#
+class svn_source(converter_source):
+ def __init__(self, ui, url, rev=None):
+ super(svn_source, self).__init__(ui, url, rev=rev)
+
+ if not (url.startswith('svn://') or url.startswith('svn+ssh://') or
+ (os.path.exists(url) and
+ os.path.exists(os.path.join(url, '.svn'))) or
+ issvnurl(url)):
+ raise NoRepo("%s does not look like a Subversion repo" % url)
+
+ try:
+ SubversionException
+ except NameError:
+ raise MissingTool(_('Subversion python bindings could not be loaded'))
+
+ try:
+ version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
+ if version < (1, 4):
+ raise MissingTool(_('Subversion python bindings %d.%d found, '
+ '1.4 or later required') % version)
+ except AttributeError:
+ raise MissingTool(_('Subversion python bindings are too old, 1.4 '
+ 'or later required'))
+
+ self.lastrevs = {}
+
+ latest = None
+ try:
+ # Support file://path@rev syntax. Useful e.g. to convert
+ # deleted branches.
+ at = url.rfind('@')
+ if at >= 0:
+ latest = int(url[at+1:])
+ url = url[:at]
+ except ValueError:
+ pass
+ self.url = geturl(url)
+ self.encoding = 'UTF-8' # Subversion is always nominal UTF-8
+ try:
+ self.transport = transport.SvnRaTransport(url=self.url)
+ self.ra = self.transport.ra
+ self.ctx = self.transport.client
+ self.baseurl = svn.ra.get_repos_root(self.ra)
+ # Module is either empty or a repository path starting with
+ # a slash and not ending with a slash.
+ self.module = urllib.unquote(self.url[len(self.baseurl):])
+ self.prevmodule = None
+ self.rootmodule = self.module
+ self.commits = {}
+ self.paths = {}
+ self.uuid = svn.ra.get_uuid(self.ra)
+ except SubversionException:
+ ui.traceback()
+ raise NoRepo("%s does not look like a Subversion repo" % self.url)
+
+ if rev:
+ try:
+ latest = int(rev)
+ except ValueError:
+ raise util.Abort(_('svn: revision %s is not an integer') % rev)
+
+ self.startrev = self.ui.config('convert', 'svn.startrev', default=0)
+ try:
+ self.startrev = int(self.startrev)
+ if self.startrev < 0:
+ self.startrev = 0
+ except ValueError:
+ raise util.Abort(_('svn: start revision %s is not an integer')
+ % self.startrev)
+
+ self.head = self.latest(self.module, latest)
+ if not self.head:
+ raise util.Abort(_('no revision found in module %s')
+ % self.module)
+ self.last_changed = self.revnum(self.head)
+
+ self._changescache = None
+
+ if os.path.exists(os.path.join(url, '.svn/entries')):
+ self.wc = url
+ else:
+ self.wc = None
+ self.convertfp = None
+
+ def setrevmap(self, revmap):
+ lastrevs = {}
+ for revid in revmap.iterkeys():
+ uuid, module, revnum = self.revsplit(revid)
+ lastrevnum = lastrevs.setdefault(module, revnum)
+ if revnum > lastrevnum:
+ lastrevs[module] = revnum
+ self.lastrevs = lastrevs
+
+ def exists(self, path, optrev):
+ try:
+ svn.client.ls(self.url.rstrip('/') + '/' + urllib.quote(path),
+ optrev, False, self.ctx)
+ return True
+ except SubversionException:
+ return False
+
+ def getheads(self):
+
+ def isdir(path, revnum):
+ kind = self._checkpath(path, revnum)
+ return kind == svn.core.svn_node_dir
+
+ def getcfgpath(name, rev):
+ cfgpath = self.ui.config('convert', 'svn.' + name)
+ if cfgpath is not None and cfgpath.strip() == '':
+ return None
+ path = (cfgpath or name).strip('/')
+ if not self.exists(path, rev):
+ if cfgpath:
+ raise util.Abort(_('expected %s to be at %r, but not found')
+ % (name, path))
+ return None
+ self.ui.note(_('found %s at %r\n') % (name, path))
+ return path
+
+ rev = optrev(self.last_changed)
+ oldmodule = ''
+ trunk = getcfgpath('trunk', rev)
+ self.tags = getcfgpath('tags', rev)
+ branches = getcfgpath('branches', rev)
+
+ # If the project has a trunk or branches, we will extract heads
+ # from them. We keep the project root otherwise.
+ if trunk:
+ oldmodule = self.module or ''
+ self.module += '/' + trunk
+ self.head = self.latest(self.module, self.last_changed)
+ if not self.head:
+ raise util.Abort(_('no revision found in module %s')
+ % self.module)
+
+ # First head in the list is the module's head
+ self.heads = [self.head]
+ if self.tags is not None:
+ self.tags = '%s/%s' % (oldmodule , (self.tags or 'tags'))
+
+ # Check if branches bring a few more heads to the list
+ if branches:
+ rpath = self.url.strip('/')
+ branchnames = svn.client.ls(rpath + '/' + urllib.quote(branches),
+ rev, False, self.ctx)
+ for branch in branchnames.keys():
+ module = '%s/%s/%s' % (oldmodule, branches, branch)
+ if not isdir(module, self.last_changed):
+ continue
+ brevid = self.latest(module, self.last_changed)
+ if not brevid:
+ self.ui.note(_('ignoring empty branch %s\n') % branch)
+ continue
+ self.ui.note(_('found branch %s at %d\n') %
+ (branch, self.revnum(brevid)))
+ self.heads.append(brevid)
+
+ if self.startrev and self.heads:
+ if len(self.heads) > 1:
+ raise util.Abort(_('svn: start revision is not supported '
+ 'with more than one branch'))
+ revnum = self.revnum(self.heads[0])
+ if revnum < self.startrev:
+ raise util.Abort(_('svn: no revision found after start revision %d')
+ % self.startrev)
+
+ return self.heads
+
+ def getfile(self, file, rev):
+ data, mode = self._getfile(file, rev)
+ self.modecache[(file, rev)] = mode
+ return data
+
+ def getmode(self, file, rev):
+ return self.modecache[(file, rev)]
+
+ def getchanges(self, rev):
+ if self._changescache and self._changescache[0] == rev:
+ return self._changescache[1]
+ self._changescache = None
+ self.modecache = {}
+ (paths, parents) = self.paths[rev]
+ if parents:
+ files, copies = self.expandpaths(rev, paths, parents)
+ else:
+ # Perform a full checkout on roots
+ uuid, module, revnum = self.revsplit(rev)
+ entries = svn.client.ls(self.baseurl + urllib.quote(module),
+ optrev(revnum), True, self.ctx)
+ files = [n for n,e in entries.iteritems()
+ if e.kind == svn.core.svn_node_file]
+ copies = {}
+
+ files.sort()
+ files = zip(files, [rev] * len(files))
+
+ # caller caches the result, so free it here to release memory
+ del self.paths[rev]
+ return (files, copies)
+
+ def getchangedfiles(self, rev, i):
+ changes = self.getchanges(rev)
+ self._changescache = (rev, changes)
+ return [f[0] for f in changes[0]]
+
+ def getcommit(self, rev):
+ if rev not in self.commits:
+ uuid, module, revnum = self.revsplit(rev)
+ self.module = module
+ self.reparent(module)
+ # We assume that:
+ # - requests for revisions after "stop" come from the
+ # revision graph backward traversal. Cache all of them
+ # down to stop, they will be used eventually.
+ # - requests for revisions before "stop" come to get
+ # isolated branches parents. Just fetch what is needed.
+ stop = self.lastrevs.get(module, 0)
+ if revnum < stop:
+ stop = revnum + 1
+ self._fetch_revisions(revnum, stop)
+ commit = self.commits[rev]
+ # caller caches the result, so free it here to release memory
+ del self.commits[rev]
+ return commit
+
+ def gettags(self):
+ tags = {}
+ if self.tags is None:
+ return tags
+
+ # svn tags are just a convention, project branches left in a
+ # 'tags' directory. There is no other relationship than
+ # ancestry, which is expensive to discover and makes them hard
+ # to update incrementally. Worse, past revisions may be
+ # referenced by tags far away in the future, requiring a deep
+ # history traversal on every calculation. Current code
+ # performs a single backward traversal, tracking moves within
+ # the tags directory (tag renaming) and recording a new tag
+ # everytime a project is copied from outside the tags
+ # directory. It also lists deleted tags, this behaviour may
+ # change in the future.
+ pendings = []
+ tagspath = self.tags
+ start = svn.ra.get_latest_revnum(self.ra)
+ try:
+ for entry in self._getlog([self.tags], start, self.startrev):
+ origpaths, revnum, author, date, message = entry
+ copies = [(e.copyfrom_path, e.copyfrom_rev, p) for p, e
+ in origpaths.iteritems() if e.copyfrom_path]
+ # Apply moves/copies from more specific to general
+ copies.sort(reverse=True)
+
+ srctagspath = tagspath
+ if copies and copies[-1][2] == tagspath:
+ # Track tags directory moves
+ srctagspath = copies.pop()[0]
+
+ for source, sourcerev, dest in copies:
+ if not dest.startswith(tagspath + '/'):
+ continue
+ for tag in pendings:
+ if tag[0].startswith(dest):
+ tagpath = source + tag[0][len(dest):]
+ tag[:2] = [tagpath, sourcerev]
+ break
+ else:
+ pendings.append([source, sourcerev, dest])
+
+ # Filter out tags with children coming from different
+ # parts of the repository like:
+ # /tags/tag.1 (from /trunk:10)
+ # /tags/tag.1/foo (from /branches/foo:12)
+ # Here/tags/tag.1 discarded as well as its children.
+ # It happens with tools like cvs2svn. Such tags cannot
+ # be represented in mercurial.
+ addeds = dict((p, e.copyfrom_path) for p, e
+ in origpaths.iteritems()
+ if e.action == 'A' and e.copyfrom_path)
+ badroots = set()
+ for destroot in addeds:
+ for source, sourcerev, dest in pendings:
+ if (not dest.startswith(destroot + '/')
+ or source.startswith(addeds[destroot] + '/')):
+ continue
+ badroots.add(destroot)
+ break
+
+ for badroot in badroots:
+ pendings = [p for p in pendings if p[2] != badroot
+ and not p[2].startswith(badroot + '/')]
+
+ # Tell tag renamings from tag creations
+ remainings = []
+ for source, sourcerev, dest in pendings:
+ tagname = dest.split('/')[-1]
+ if source.startswith(srctagspath):
+ remainings.append([source, sourcerev, tagname])
+ continue
+ if tagname in tags:
+ # Keep the latest tag value
+ continue
+ # From revision may be fake, get one with changes
+ try:
+ tagid = self.latest(source, sourcerev)
+ if tagid and tagname not in tags:
+ tags[tagname] = tagid
+ except SvnPathNotFound:
+ # It happens when we are following directories
+ # we assumed were copied with their parents
+ # but were really created in the tag
+ # directory.
+ pass
+ pendings = remainings
+ tagspath = srctagspath
+
+ except SubversionException:
+ self.ui.note(_('no tags found at revision %d\n') % start)
+ return tags
+
+ def converted(self, rev, destrev):
+ if not self.wc:
+ return
+ if self.convertfp is None:
+ self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
+ 'a')
+ self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
+ self.convertfp.flush()
+
+ def revid(self, revnum, module=None):
+ return 'svn:%s%s@%s' % (self.uuid, module or self.module, revnum)
+
+ def revnum(self, rev):
+ return int(rev.split('@')[-1])
+
+ def revsplit(self, rev):
+ url, revnum = rev.rsplit('@', 1)
+ revnum = int(revnum)
+ parts = url.split('/', 1)
+ uuid = parts.pop(0)[4:]
+ mod = ''
+ if parts:
+ mod = '/' + parts[0]
+ return uuid, mod, revnum
+
+ def latest(self, path, stop=0):
+ """Find the latest revid affecting path, up to stop. It may return
+ a revision in a different module, since a branch may be moved without
+ a change being reported. Return None if computed module does not
+ belong to rootmodule subtree.
+ """
+ if not path.startswith(self.rootmodule):
+ # Requests on foreign branches may be forbidden at server level
+ self.ui.debug(_('ignoring foreign branch %r\n') % path)
+ return None
+
+ if not stop:
+ stop = svn.ra.get_latest_revnum(self.ra)
+ try:
+ prevmodule = self.reparent('')
+ dirent = svn.ra.stat(self.ra, path.strip('/'), stop)
+ self.reparent(prevmodule)
+ except SubversionException:
+ dirent = None
+ if not dirent:
+ raise SvnPathNotFound(_('%s not found up to revision %d') % (path, stop))
+
+ # stat() gives us the previous revision on this line of
+ # development, but it might be in *another module*. Fetch the
+ # log and detect renames down to the latest revision.
+ stream = self._getlog([path], stop, dirent.created_rev)
+ try:
+ for entry in stream:
+ paths, revnum, author, date, message = entry
+ if revnum <= dirent.created_rev:
+ break
+
+ for p in paths:
+ if not path.startswith(p) or not paths[p].copyfrom_path:
+ continue
+ newpath = paths[p].copyfrom_path + path[len(p):]
+ self.ui.debug(_("branch renamed from %s to %s at %d\n") %
+ (path, newpath, revnum))
+ path = newpath
+ break
+ finally:
+ stream.close()
+
+ if not path.startswith(self.rootmodule):
+ self.ui.debug(_('ignoring foreign branch %r\n') % path)
+ return None
+ return self.revid(dirent.created_rev, path)
+
+ def reparent(self, module):
+ """Reparent the svn transport and return the previous parent."""
+ if self.prevmodule == module:
+ return module
+ svnurl = self.baseurl + urllib.quote(module)
+ prevmodule = self.prevmodule
+ if prevmodule is None:
+ prevmodule = ''
+ self.ui.debug(_("reparent to %s\n") % svnurl)
+ svn.ra.reparent(self.ra, svnurl)
+ self.prevmodule = module
+ return prevmodule
+
+ def expandpaths(self, rev, paths, parents):
+ entries = []
+ # Map of entrypath, revision for finding source of deleted
+ # revisions.
+ copyfrom = {}
+ copies = {}
+
+ new_module, revnum = self.revsplit(rev)[1:]
+ if new_module != self.module:
+ self.module = new_module
+ self.reparent(self.module)
+
+ for path, ent in paths:
+ entrypath = self.getrelpath(path)
+
+ kind = self._checkpath(entrypath, revnum)
+ if kind == svn.core.svn_node_file:
+ entries.append(self.recode(entrypath))
+ if not ent.copyfrom_path or not parents:
+ continue
+ # Copy sources not in parent revisions cannot be
+ # represented, ignore their origin for now
+ pmodule, prevnum = self.revsplit(parents[0])[1:]
+ if ent.copyfrom_rev < prevnum:
+ continue
+ copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
+ if not copyfrom_path:
+ continue
+ self.ui.debug(_("copied to %s from %s@%s\n") %
+ (entrypath, copyfrom_path, ent.copyfrom_rev))
+ copies[self.recode(entrypath)] = self.recode(copyfrom_path)
+ elif kind == 0: # gone, but had better be a deleted *file*
+ self.ui.debug(_("gone from %s\n") % ent.copyfrom_rev)
+ pmodule, prevnum = self.revsplit(parents[0])[1:]
+ parentpath = pmodule + "/" + entrypath
+ self.ui.debug(_("entry %s\n") % parentpath)
+
+ # We can avoid the reparent calls if the module has
+ # not changed but it probably does not worth the pain.
+ prevmodule = self.reparent('')
+ fromkind = svn.ra.check_path(self.ra, parentpath.strip('/'), prevnum)
+ self.reparent(prevmodule)
+
+ if fromkind == svn.core.svn_node_file:
+ entries.append(self.recode(entrypath))
+ elif fromkind == svn.core.svn_node_dir:
+ if ent.action == 'C':
+ children = self._find_children(path, prevnum)
+ else:
+ oroot = parentpath.strip('/')
+ nroot = path.strip('/')
+ children = self._find_children(oroot, prevnum)
+ children = [s.replace(oroot,nroot) for s in children]
+
+ for child in children:
+ childpath = self.getrelpath("/" + child, pmodule)
+ if not childpath:
+ continue
+ if childpath in copies:
+ del copies[childpath]
+ entries.append(childpath)
+ else:
+ self.ui.debug(_('unknown path in revision %d: %s\n') % \
+ (revnum, path))
+ elif kind == svn.core.svn_node_dir:
+ # If the directory just had a prop change,
+ # then we shouldn't need to look for its children.
+ if ent.action == 'M':
+ continue
+
+ children = sorted(self._find_children(path, revnum))
+ for child in children:
+ # Can we move a child directory and its
+ # parent in the same commit? (probably can). Could
+ # cause problems if instead of revnum -1,
+ # we have to look in (copyfrom_path, revnum - 1)
+ entrypath = self.getrelpath("/" + child)
+ if entrypath:
+ # Need to filter out directories here...
+ kind = self._checkpath(entrypath, revnum)
+ if kind != svn.core.svn_node_dir:
+ entries.append(self.recode(entrypath))
+
+ # Handle directory copies
+ if not ent.copyfrom_path or not parents:
+ continue
+ # Copy sources not in parent revisions cannot be
+ # represented, ignore their origin for now
+ pmodule, prevnum = self.revsplit(parents[0])[1:]
+ if ent.copyfrom_rev < prevnum:
+ continue
+ copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule)
+ if not copyfrompath:
+ continue
+ copyfrom[path] = ent
+ self.ui.debug(_("mark %s came from %s:%d\n")
+ % (path, copyfrompath, ent.copyfrom_rev))
+ children = self._find_children(ent.copyfrom_path, ent.copyfrom_rev)
+ children.sort()
+ for child in children:
+ entrypath = self.getrelpath("/" + child, pmodule)
+ if not entrypath:
+ continue
+ copytopath = path + entrypath[len(copyfrompath):]
+ copytopath = self.getrelpath(copytopath)
+ copies[self.recode(copytopath)] = self.recode(entrypath)
+
+ return (list(set(entries)), copies)
+
+ def _fetch_revisions(self, from_revnum, to_revnum):
+ if from_revnum < to_revnum:
+ from_revnum, to_revnum = to_revnum, from_revnum
+
+ self.child_cset = None
+
+ def parselogentry(orig_paths, revnum, author, date, message):
+ """Return the parsed commit object or None, and True if
+ the revision is a branch root.
+ """
+ self.ui.debug(_("parsing revision %d (%d changes)\n") %
+ (revnum, len(orig_paths)))
+
+ branched = False
+ rev = self.revid(revnum)
+ # branch log might return entries for a parent we already have
+
+ if rev in self.commits or revnum < to_revnum:
+ return None, branched
+
+ parents = []
+ # check whether this revision is the start of a branch or part
+ # of a branch renaming
+ orig_paths = sorted(orig_paths.iteritems())
+ root_paths = [(p,e) for p,e in orig_paths if self.module.startswith(p)]
+ if root_paths:
+ path, ent = root_paths[-1]
+ if ent.copyfrom_path:
+ branched = True
+ newpath = ent.copyfrom_path + self.module[len(path):]
+ # ent.copyfrom_rev may not be the actual last revision
+ previd = self.latest(newpath, ent.copyfrom_rev)
+ if previd is not None:
+ prevmodule, prevnum = self.revsplit(previd)[1:]
+ if prevnum >= self.startrev:
+ parents = [previd]
+ self.ui.note(_('found parent of branch %s at %d: %s\n') %
+ (self.module, prevnum, prevmodule))
+ else:
+ self.ui.debug(_("no copyfrom path, don't know what to do.\n"))
+
+ paths = []
+ # filter out unrelated paths
+ for path, ent in orig_paths:
+ if self.getrelpath(path) is None:
+ continue
+ paths.append((path, ent))
+
+ # Example SVN datetime. Includes microseconds.
+ # ISO-8601 conformant
+ # '2007-01-04T17:35:00.902377Z'
+ date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
+
+ log = message and self.recode(message) or ''
+ author = author and self.recode(author) or ''
+ try:
+ branch = self.module.split("/")[-1]
+ if branch == 'trunk':
+ branch = ''
+ except IndexError:
+ branch = None
+
+ cset = commit(author=author,
+ date=util.datestr(date),
+ desc=log,
+ parents=parents,
+ branch=branch,
+ rev=rev)
+
+ self.commits[rev] = cset
+ # The parents list is *shared* among self.paths and the
+ # commit object. Both will be updated below.
+ self.paths[rev] = (paths, cset.parents)
+ if self.child_cset and not self.child_cset.parents:
+ self.child_cset.parents[:] = [rev]
+ self.child_cset = cset
+ return cset, branched
+
+ self.ui.note(_('fetching revision log for "%s" from %d to %d\n') %
+ (self.module, from_revnum, to_revnum))
+
+ try:
+ firstcset = None
+ lastonbranch = False
+ stream = self._getlog([self.module], from_revnum, to_revnum)
+ try:
+ for entry in stream:
+ paths, revnum, author, date, message = entry
+ if revnum < self.startrev:
+ lastonbranch = True
+ break
+ if not paths:
+ self.ui.debug(_('revision %d has no entries\n') % revnum)
+ continue
+ cset, lastonbranch = parselogentry(paths, revnum, author,
+ date, message)
+ if cset:
+ firstcset = cset
+ if lastonbranch:
+ break
+ finally:
+ stream.close()
+
+ if not lastonbranch and firstcset and not firstcset.parents:
+ # The first revision of the sequence (the last fetched one)
+ # has invalid parents if not a branch root. Find the parent
+ # revision now, if any.
+ try:
+ firstrevnum = self.revnum(firstcset.rev)
+ if firstrevnum > 1:
+ latest = self.latest(self.module, firstrevnum - 1)
+ if latest:
+ firstcset.parents.append(latest)
+ except SvnPathNotFound:
+ pass
+ except SubversionException, (inst, num):
+ if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
+ raise util.Abort(_('svn: branch has no revision %s') % to_revnum)
+ raise
+
+ def _getfile(self, file, rev):
+ # TODO: ra.get_file transmits the whole file instead of diffs.
+ mode = ''
+ try:
+ new_module, revnum = self.revsplit(rev)[1:]
+ if self.module != new_module:
+ self.module = new_module
+ self.reparent(self.module)
+ io = StringIO()
+ info = svn.ra.get_file(self.ra, file, revnum, io)
+ data = io.getvalue()
+ # ra.get_files() seems to keep a reference on the input buffer
+ # preventing collection. Release it explicitely.
+ io.close()
+ if isinstance(info, list):
+ info = info[-1]
+ mode = ("svn:executable" in info) and 'x' or ''
+ mode = ("svn:special" in info) and 'l' or mode
+ except SubversionException, e:
+ notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
+ svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
+ if e.apr_err in notfound: # File not found
+ raise IOError()
+ raise
+ if mode == 'l':
+ link_prefix = "link "
+ if data.startswith(link_prefix):
+ data = data[len(link_prefix):]
+ return data, mode
+
+ def _find_children(self, path, revnum):
+ path = path.strip('/')
+ pool = Pool()
+ rpath = '/'.join([self.baseurl, urllib.quote(path)]).strip('/')
+ return ['%s/%s' % (path, x) for x in
+ svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool).keys()]
+
+ def getrelpath(self, path, module=None):
+ if module is None:
+ module = self.module
+ # Given the repository url of this wc, say
+ # "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
+ # extract the "entry" portion (a relative path) from what
+ # svn log --xml says, ie
+ # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
+ # that is to say "tests/PloneTestCase.py"
+ if path.startswith(module):
+ relative = path.rstrip('/')[len(module):]
+ if relative.startswith('/'):
+ return relative[1:]
+ elif relative == '':
+ return relative
+
+ # The path is outside our tracked tree...
+ self.ui.debug(_('%r is not under %r, ignoring\n') % (path, module))
+ return None
+
+ def _checkpath(self, path, revnum):
+ # ra.check_path does not like leading slashes very much, it leads
+ # to PROPFIND subversion errors
+ return svn.ra.check_path(self.ra, path.strip('/'), revnum)
+
+ def _getlog(self, paths, start, end, limit=0, discover_changed_paths=True,
+ strict_node_history=False):
+ # Normalize path names, svn >= 1.5 only wants paths relative to
+ # supplied URL
+ relpaths = []
+ for p in paths:
+ if not p.startswith('/'):
+ p = self.module + '/' + p
+ relpaths.append(p.strip('/'))
+ args = [self.baseurl, relpaths, start, end, limit, discover_changed_paths,
+ strict_node_history]
+ arg = encodeargs(args)
+ hgexe = util.hgexecutable()
+ cmd = '%s debugsvnlog' % util.shellquote(hgexe)
+ stdin, stdout = util.popen2(cmd)
+ stdin.write(arg)
+ stdin.close()
+ return logstream(stdout)
+
+pre_revprop_change = '''#!/bin/sh
+
+REPOS="$1"
+REV="$2"
+USER="$3"
+PROPNAME="$4"
+ACTION="$5"
+
+if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
+if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
+if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
+
+echo "Changing prohibited revision property" >&2
+exit 1
+'''
+
+class svn_sink(converter_sink, commandline):
+ commit_re = re.compile(r'Committed revision (\d+).', re.M)
+
+ def prerun(self):
+ if self.wc:
+ os.chdir(self.wc)
+
+ def postrun(self):
+ if self.wc:
+ os.chdir(self.cwd)
+
+ def join(self, name):
+ return os.path.join(self.wc, '.svn', name)
+
+ def revmapfile(self):
+ return self.join('hg-shamap')
+
+ def authorfile(self):
+ return self.join('hg-authormap')
+
+ def __init__(self, ui, path):
+ converter_sink.__init__(self, ui, path)
+ commandline.__init__(self, ui, 'svn')
+ self.delete = []
+ self.setexec = []
+ self.delexec = []
+ self.copies = []
+ self.wc = None
+ self.cwd = os.getcwd()
+
+ path = os.path.realpath(path)
+
+ created = False
+ if os.path.isfile(os.path.join(path, '.svn', 'entries')):
+ self.wc = path
+ self.run0('update')
+ else:
+ wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
+
+ if os.path.isdir(os.path.dirname(path)):
+ if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
+ ui.status(_('initializing svn repo %r\n') %
+ os.path.basename(path))
+ commandline(ui, 'svnadmin').run0('create', path)
+ created = path
+ path = util.normpath(path)
+ if not path.startswith('/'):
+ path = '/' + path
+ path = 'file://' + path
+
+ ui.status(_('initializing svn wc %r\n') % os.path.basename(wcpath))
+ self.run0('checkout', path, wcpath)
+
+ self.wc = wcpath
+ self.opener = util.opener(self.wc)
+ self.wopener = util.opener(self.wc)
+ self.childmap = mapfile(ui, self.join('hg-childmap'))
+ self.is_exec = util.checkexec(self.wc) and util.is_exec or None
+
+ if created:
+ hook = os.path.join(created, 'hooks', 'pre-revprop-change')
+ fp = open(hook, 'w')
+ fp.write(pre_revprop_change)
+ fp.close()
+ util.set_flags(hook, False, True)
+
+ xport = transport.SvnRaTransport(url=geturl(path))
+ self.uuid = svn.ra.get_uuid(xport.ra)
+
+ def wjoin(self, *names):
+ return os.path.join(self.wc, *names)
+
+ def putfile(self, filename, flags, data):
+ if 'l' in flags:
+ self.wopener.symlink(data, filename)
+ else:
+ try:
+ if os.path.islink(self.wjoin(filename)):
+ os.unlink(filename)
+ except OSError:
+ pass
+ self.wopener(filename, 'w').write(data)
+
+ if self.is_exec:
+ was_exec = self.is_exec(self.wjoin(filename))
+ else:
+ # On filesystems not supporting execute-bit, there is no way
+ # to know if it is set but asking subversion. Setting it
+ # systematically is just as expensive and much simpler.
+ was_exec = 'x' not in flags
+
+ util.set_flags(self.wjoin(filename), False, 'x' in flags)
+ if was_exec:
+ if 'x' not in flags:
+ self.delexec.append(filename)
+ else:
+ if 'x' in flags:
+ self.setexec.append(filename)
+
+ def _copyfile(self, source, dest):
+ # SVN's copy command pukes if the destination file exists, but
+ # our copyfile method expects to record a copy that has
+ # already occurred. Cross the semantic gap.
+ wdest = self.wjoin(dest)
+ exists = os.path.exists(wdest)
+ if exists:
+ fd, tempname = tempfile.mkstemp(
+ prefix='hg-copy-', dir=os.path.dirname(wdest))
+ os.close(fd)
+ os.unlink(tempname)
+ os.rename(wdest, tempname)
+ try:
+ self.run0('copy', source, dest)
+ finally:
+ if exists:
+ try:
+ os.unlink(wdest)
+ except OSError:
+ pass
+ os.rename(tempname, wdest)
+
+ def dirs_of(self, files):
+ dirs = set()
+ for f in files:
+ if os.path.isdir(self.wjoin(f)):
+ dirs.add(f)
+ for i in strutil.rfindall(f, '/'):
+ dirs.add(f[:i])
+ return dirs
+
+ def add_dirs(self, files):
+ add_dirs = [d for d in sorted(self.dirs_of(files))
+ if not os.path.exists(self.wjoin(d, '.svn', 'entries'))]
+ if add_dirs:
+ self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
+ return add_dirs
+
+ def add_files(self, files):
+ if files:
+ self.xargs(files, 'add', quiet=True)
+ return files
+
+ def tidy_dirs(self, names):
+ deleted = []
+ for d in sorted(self.dirs_of(names), reverse=True):
+ wd = self.wjoin(d)
+ if os.listdir(wd) == '.svn':
+ self.run0('delete', d)
+ deleted.append(d)
+ return deleted
+
+ def addchild(self, parent, child):
+ self.childmap[parent] = child
+
+ def revid(self, rev):
+ return u"svn:%s@%s" % (self.uuid, rev)
+
+ def putcommit(self, files, copies, parents, commit, source, revmap):
+ # Apply changes to working copy
+ for f, v in files:
+ try:
+ data = source.getfile(f, v)
+ except IOError:
+ self.delete.append(f)
+ else:
+ e = source.getmode(f, v)
+ self.putfile(f, e, data)
+ if f in copies:
+ self.copies.append([copies[f], f])
+ files = [f[0] for f in files]
+
+ for parent in parents:
+ try:
+ return self.revid(self.childmap[parent])
+ except KeyError:
+ pass
+ entries = set(self.delete)
+ files = frozenset(files)
+ entries.update(self.add_dirs(files.difference(entries)))
+ if self.copies:
+ for s, d in self.copies:
+ self._copyfile(s, d)
+ self.copies = []
+ if self.delete:
+ self.xargs(self.delete, 'delete')
+ self.delete = []
+ entries.update(self.add_files(files.difference(entries)))
+ entries.update(self.tidy_dirs(entries))
+ if self.delexec:
+ self.xargs(self.delexec, 'propdel', 'svn:executable')
+ self.delexec = []
+ if self.setexec:
+ self.xargs(self.setexec, 'propset', 'svn:executable', '*')
+ self.setexec = []
+
+ fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
+ fp = os.fdopen(fd, 'w')
+ fp.write(commit.desc)
+ fp.close()
+ try:
+ output = self.run0('commit',
+ username=util.shortuser(commit.author),
+ file=messagefile,
+ encoding='utf-8')
+ try:
+ rev = self.commit_re.search(output).group(1)
+ except AttributeError:
+ self.ui.warn(_('unexpected svn output:\n'))
+ self.ui.warn(output)
+ raise util.Abort(_('unable to cope with svn output'))
+ if commit.rev:
+ self.run('propset', 'hg:convert-rev', commit.rev,
+ revprop=True, revision=rev)
+ if commit.branch and commit.branch != 'default':
+ self.run('propset', 'hg:convert-branch', commit.branch,
+ revprop=True, revision=rev)
+ for parent in parents:
+ self.addchild(parent, rev)
+ return self.revid(rev)
+ finally:
+ os.unlink(messagefile)
+
+ def puttags(self, tags):
+ self.ui.warn(_('XXX TAGS NOT IMPLEMENTED YET\n'))
diff --git a/sys/src/cmd/hg/hgext/convert/transport.py b/sys/src/cmd/hg/hgext/convert/transport.py
new file mode 100644
index 000000000..0d77cca4d
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/convert/transport.py
@@ -0,0 +1,128 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2007 Daniel Holth <dholth@fastmail.fm>
+# This is a stripped-down version of the original bzr-svn transport.py,
+# Copyright (C) 2006 Jelmer Vernooij <jelmer@samba.org>
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+from svn.core import SubversionException, Pool
+import svn.ra
+import svn.client
+import svn.core
+
+# Some older versions of the Python bindings need to be
+# explicitly initialized. But what we want to do probably
+# won't work worth a darn against those libraries anyway!
+svn.ra.initialize()
+
+svn_config = svn.core.svn_config_get_config(None)
+
+
+def _create_auth_baton(pool):
+ """Create a Subversion authentication baton. """
+ import svn.client
+ # Give the client context baton a suite of authentication
+ # providers.h
+ providers = [
+ svn.client.get_simple_provider(pool),
+ svn.client.get_username_provider(pool),
+ svn.client.get_ssl_client_cert_file_provider(pool),
+ svn.client.get_ssl_client_cert_pw_file_provider(pool),
+ svn.client.get_ssl_server_trust_file_provider(pool),
+ ]
+ # Platform-dependant authentication methods
+ getprovider = getattr(svn.core, 'svn_auth_get_platform_specific_provider',
+ None)
+ if getprovider:
+ # Available in svn >= 1.6
+ for name in ('gnome_keyring', 'keychain', 'kwallet', 'windows'):
+ for type in ('simple', 'ssl_client_cert_pw', 'ssl_server_trust'):
+ p = getprovider(name, type, pool)
+ if p:
+ providers.append(p)
+ else:
+ if hasattr(svn.client, 'get_windows_simple_provider'):
+ providers.append(svn.client.get_windows_simple_provider(pool))
+
+ return svn.core.svn_auth_open(providers, pool)
+
+class NotBranchError(SubversionException):
+ pass
+
+class SvnRaTransport(object):
+ """
+ Open an ra connection to a Subversion repository.
+ """
+ def __init__(self, url="", ra=None):
+ self.pool = Pool()
+ self.svn_url = url
+ self.username = ''
+ self.password = ''
+
+ # Only Subversion 1.4 has reparent()
+ if ra is None or not hasattr(svn.ra, 'reparent'):
+ self.client = svn.client.create_context(self.pool)
+ ab = _create_auth_baton(self.pool)
+ if False:
+ svn.core.svn_auth_set_parameter(
+ ab, svn.core.SVN_AUTH_PARAM_DEFAULT_USERNAME, self.username)
+ svn.core.svn_auth_set_parameter(
+ ab, svn.core.SVN_AUTH_PARAM_DEFAULT_PASSWORD, self.password)
+ self.client.auth_baton = ab
+ self.client.config = svn_config
+ try:
+ self.ra = svn.client.open_ra_session(
+ self.svn_url.encode('utf8'),
+ self.client, self.pool)
+ except SubversionException, (inst, num):
+ if num in (svn.core.SVN_ERR_RA_ILLEGAL_URL,
+ svn.core.SVN_ERR_RA_LOCAL_REPOS_OPEN_FAILED,
+ svn.core.SVN_ERR_BAD_URL):
+ raise NotBranchError(url)
+ raise
+ else:
+ self.ra = ra
+ svn.ra.reparent(self.ra, self.svn_url.encode('utf8'))
+
+ class Reporter(object):
+ def __init__(self, (reporter, report_baton)):
+ self._reporter = reporter
+ self._baton = report_baton
+
+ def set_path(self, path, revnum, start_empty, lock_token, pool=None):
+ svn.ra.reporter2_invoke_set_path(self._reporter, self._baton,
+ path, revnum, start_empty, lock_token, pool)
+
+ def delete_path(self, path, pool=None):
+ svn.ra.reporter2_invoke_delete_path(self._reporter, self._baton,
+ path, pool)
+
+ def link_path(self, path, url, revision, start_empty, lock_token,
+ pool=None):
+ svn.ra.reporter2_invoke_link_path(self._reporter, self._baton,
+ path, url, revision, start_empty, lock_token,
+ pool)
+
+ def finish_report(self, pool=None):
+ svn.ra.reporter2_invoke_finish_report(self._reporter,
+ self._baton, pool)
+
+ def abort_report(self, pool=None):
+ svn.ra.reporter2_invoke_abort_report(self._reporter,
+ self._baton, pool)
+
+ def do_update(self, revnum, path, *args, **kwargs):
+ return self.Reporter(svn.ra.do_update(self.ra, revnum, path, *args, **kwargs))
diff --git a/sys/src/cmd/hg/hgext/extdiff.py b/sys/src/cmd/hg/hgext/extdiff.py
new file mode 100644
index 000000000..56e29f4df
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/extdiff.py
@@ -0,0 +1,228 @@
+# extdiff.py - external diff program support for mercurial
+#
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''command to allow external programs to compare revisions
+
+The extdiff Mercurial extension allows you to use external programs
+to compare revisions, or revision with working directory. The external
+diff programs are called with a configurable set of options and two
+non-option arguments: paths to directories containing snapshots of
+files to compare.
+
+The extdiff extension also allows to configure new diff commands, so
+you do not need to type "hg extdiff -p kdiff3" always. ::
+
+ [extdiff]
+ # add new command that runs GNU diff(1) in 'context diff' mode
+ cdiff = gdiff -Nprc5
+ ## or the old way:
+ #cmd.cdiff = gdiff
+ #opts.cdiff = -Nprc5
+
+ # add new command called vdiff, runs kdiff3
+ vdiff = kdiff3
+
+ # add new command called meld, runs meld (no need to name twice)
+ meld =
+
+ # add new command called vimdiff, runs gvimdiff with DirDiff plugin
+ # (see http://www.vim.org/scripts/script.php?script_id=102) Non
+ # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
+ # your .vimrc
+ vimdiff = gvim -f '+next' '+execute "DirDiff" argv(0) argv(1)'
+
+You can use -I/-X and list of file or directory names like normal "hg
+diff" command. The extdiff extension makes snapshots of only needed
+files, so running the external diff program will actually be pretty
+fast (at least faster than having to compare the entire tree).
+'''
+
+from mercurial.i18n import _
+from mercurial.node import short
+from mercurial import cmdutil, util, commands
+import os, shlex, shutil, tempfile
+
+def snapshot(ui, repo, files, node, tmproot):
+ '''snapshot files as of some revision
+ if not using snapshot, -I/-X does not work and recursive diff
+ in tools like kdiff3 and meld displays too many files.'''
+ dirname = os.path.basename(repo.root)
+ if dirname == "":
+ dirname = "root"
+ if node is not None:
+ dirname = '%s.%s' % (dirname, short(node))
+ base = os.path.join(tmproot, dirname)
+ os.mkdir(base)
+ if node is not None:
+ ui.note(_('making snapshot of %d files from rev %s\n') %
+ (len(files), short(node)))
+ else:
+ ui.note(_('making snapshot of %d files from working directory\n') %
+ (len(files)))
+ wopener = util.opener(base)
+ fns_and_mtime = []
+ ctx = repo[node]
+ for fn in files:
+ wfn = util.pconvert(fn)
+ if not wfn in ctx:
+ # skipping new file after a merge ?
+ continue
+ ui.note(' %s\n' % wfn)
+ dest = os.path.join(base, wfn)
+ fctx = ctx[wfn]
+ data = repo.wwritedata(wfn, fctx.data())
+ if 'l' in fctx.flags():
+ wopener.symlink(data, wfn)
+ else:
+ wopener(wfn, 'w').write(data)
+ if 'x' in fctx.flags():
+ util.set_flags(dest, False, True)
+ if node is None:
+ fns_and_mtime.append((dest, repo.wjoin(fn), os.path.getmtime(dest)))
+ return dirname, fns_and_mtime
+
+def dodiff(ui, repo, diffcmd, diffopts, pats, opts):
+ '''Do the actuall diff:
+
+ - copy to a temp structure if diffing 2 internal revisions
+ - copy to a temp structure if diffing working revision with
+ another one and more than 1 file is changed
+ - just invoke the diff for a single file in the working dir
+ '''
+
+ revs = opts.get('rev')
+ change = opts.get('change')
+
+ if revs and change:
+ msg = _('cannot specify --rev and --change at the same time')
+ raise util.Abort(msg)
+ elif change:
+ node2 = repo.lookup(change)
+ node1 = repo[node2].parents()[0].node()
+ else:
+ node1, node2 = cmdutil.revpair(repo, revs)
+
+ matcher = cmdutil.match(repo, pats, opts)
+ modified, added, removed = repo.status(node1, node2, matcher)[:3]
+ if not (modified or added or removed):
+ return 0
+
+ tmproot = tempfile.mkdtemp(prefix='extdiff.')
+ dir2root = ''
+ try:
+ # Always make a copy of node1
+ dir1 = snapshot(ui, repo, modified + removed, node1, tmproot)[0]
+ changes = len(modified) + len(removed) + len(added)
+
+ # If node2 in not the wc or there is >1 change, copy it
+ if node2 or changes > 1:
+ dir2, fns_and_mtime = snapshot(ui, repo, modified + added, node2, tmproot)
+ else:
+ # This lets the diff tool open the changed file directly
+ dir2 = ''
+ dir2root = repo.root
+ fns_and_mtime = []
+
+ # If only one change, diff the files instead of the directories
+ if changes == 1 :
+ if len(modified):
+ dir1 = os.path.join(dir1, util.localpath(modified[0]))
+ dir2 = os.path.join(dir2root, dir2, util.localpath(modified[0]))
+ elif len(removed) :
+ dir1 = os.path.join(dir1, util.localpath(removed[0]))
+ dir2 = os.devnull
+ else:
+ dir1 = os.devnull
+ dir2 = os.path.join(dir2root, dir2, util.localpath(added[0]))
+
+ cmdline = ('%s %s %s %s' %
+ (util.shellquote(diffcmd), ' '.join(diffopts),
+ util.shellquote(dir1), util.shellquote(dir2)))
+ ui.debug(_('running %r in %s\n') % (cmdline, tmproot))
+ util.system(cmdline, cwd=tmproot)
+
+ for copy_fn, working_fn, mtime in fns_and_mtime:
+ if os.path.getmtime(copy_fn) != mtime:
+ ui.debug(_('file changed while diffing. '
+ 'Overwriting: %s (src: %s)\n') % (working_fn, copy_fn))
+ util.copyfile(copy_fn, working_fn)
+
+ return 1
+ finally:
+ ui.note(_('cleaning up temp directory\n'))
+ shutil.rmtree(tmproot)
+
+def extdiff(ui, repo, *pats, **opts):
+ '''use external program to diff repository (or selected files)
+
+ Show differences between revisions for the specified files, using
+ an external program. The default program used is diff, with
+ default options "-Npru".
+
+ To select a different program, use the -p/--program option. The
+ program will be passed the names of two directories to compare. To
+ pass additional options to the program, use -o/--option. These
+ will be passed before the names of the directories to compare.
+
+ When two revision arguments are given, then changes are shown
+ between those revisions. If only one revision is specified then
+ that revision is compared to the working directory, and, when no
+ revisions are specified, the working directory files are compared
+ to its parent.'''
+ program = opts['program'] or 'diff'
+ if opts['program']:
+ option = opts['option']
+ else:
+ option = opts['option'] or ['-Npru']
+ return dodiff(ui, repo, program, option, pats, opts)
+
+cmdtable = {
+ "extdiff":
+ (extdiff,
+ [('p', 'program', '', _('comparison program to run')),
+ ('o', 'option', [], _('pass option to comparison program')),
+ ('r', 'rev', [], _('revision')),
+ ('c', 'change', '', _('change made by revision')),
+ ] + commands.walkopts,
+ _('hg extdiff [OPT]... [FILE]...')),
+ }
+
+def uisetup(ui):
+ for cmd, path in ui.configitems('extdiff'):
+ if cmd.startswith('cmd.'):
+ cmd = cmd[4:]
+ if not path: path = cmd
+ diffopts = ui.config('extdiff', 'opts.' + cmd, '')
+ diffopts = diffopts and [diffopts] or []
+ elif cmd.startswith('opts.'):
+ continue
+ else:
+ # command = path opts
+ if path:
+ diffopts = shlex.split(path)
+ path = diffopts.pop(0)
+ else:
+ path, diffopts = cmd, []
+ def save(cmd, path, diffopts):
+ '''use closure to save diff command to use'''
+ def mydiff(ui, repo, *pats, **opts):
+ return dodiff(ui, repo, path, diffopts, pats, opts)
+ mydiff.__doc__ = _('''\
+use %(path)s to diff repository (or selected files)
+
+ Show differences between revisions for the specified files, using the
+ %(path)s program.
+
+ When two revision arguments are given, then changes are shown between
+ those revisions. If only one revision is specified then that revision is
+ compared to the working directory, and, when no revisions are specified,
+ the working directory files are compared to its parent.\
+''') % dict(path=util.uirepr(path))
+ return mydiff
+ cmdtable[cmd] = (save(cmd, path, diffopts),
+ cmdtable['extdiff'][1][1:],
+ _('hg %s [OPTION]... [FILE]...') % cmd)
diff --git a/sys/src/cmd/hg/hgext/fetch.py b/sys/src/cmd/hg/hgext/fetch.py
new file mode 100644
index 000000000..05cd3fcc3
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/fetch.py
@@ -0,0 +1,148 @@
+# fetch.py - pull and merge remote changes
+#
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''pull, update and merge in one command'''
+
+from mercurial.i18n import _
+from mercurial.node import nullid, short
+from mercurial import commands, cmdutil, hg, util, url, error
+from mercurial.lock import release
+
+def fetch(ui, repo, source='default', **opts):
+ '''pull changes from a remote repository, merge new changes if needed.
+
+ This finds all changes from the repository at the specified path
+ or URL and adds them to the local repository.
+
+ If the pulled changes add a new branch head, the head is
+ automatically merged, and the result of the merge is committed.
+ Otherwise, the working directory is updated to include the new
+ changes.
+
+ When a merge occurs, the newly pulled changes are assumed to be
+ "authoritative". The head of the new changes is used as the first
+ parent, with local changes as the second. To switch the merge
+ order, use --switch-parent.
+
+ See 'hg help dates' for a list of formats valid for -d/--date.
+ '''
+
+ date = opts.get('date')
+ if date:
+ opts['date'] = util.parsedate(date)
+
+ parent, p2 = repo.dirstate.parents()
+ branch = repo.dirstate.branch()
+ branchnode = repo.branchtags().get(branch)
+ if parent != branchnode:
+ raise util.Abort(_('working dir not at branch tip '
+ '(use "hg update" to check out branch tip)'))
+
+ if p2 != nullid:
+ raise util.Abort(_('outstanding uncommitted merge'))
+
+ wlock = lock = None
+ try:
+ wlock = repo.wlock()
+ lock = repo.lock()
+ mod, add, rem, del_ = repo.status()[:4]
+
+ if mod or add or rem:
+ raise util.Abort(_('outstanding uncommitted changes'))
+ if del_:
+ raise util.Abort(_('working directory is missing some files'))
+ bheads = repo.branchheads(branch)
+ bheads = [head for head in bheads if len(repo[head].children()) == 0]
+ if len(bheads) > 1:
+ raise util.Abort(_('multiple heads in this branch '
+ '(use "hg heads ." and "hg merge" to merge)'))
+
+ other = hg.repository(cmdutil.remoteui(repo, opts),
+ ui.expandpath(source))
+ ui.status(_('pulling from %s\n') %
+ url.hidepassword(ui.expandpath(source)))
+ revs = None
+ if opts['rev']:
+ try:
+ revs = [other.lookup(rev) for rev in opts['rev']]
+ except error.CapabilityError:
+ err = _("Other repository doesn't support revision lookup, "
+ "so a rev cannot be specified.")
+ raise util.Abort(err)
+
+ # Are there any changes at all?
+ modheads = repo.pull(other, heads=revs)
+ if modheads == 0:
+ return 0
+
+ # Is this a simple fast-forward along the current branch?
+ newheads = repo.branchheads(branch)
+ newheads = [head for head in newheads if len(repo[head].children()) == 0]
+ newchildren = repo.changelog.nodesbetween([parent], newheads)[2]
+ if len(newheads) == 1:
+ if newchildren[0] != parent:
+ return hg.clean(repo, newchildren[0])
+ else:
+ return
+
+ # Are there more than one additional branch heads?
+ newchildren = [n for n in newchildren if n != parent]
+ newparent = parent
+ if newchildren:
+ newparent = newchildren[0]
+ hg.clean(repo, newparent)
+ newheads = [n for n in newheads if n != newparent]
+ if len(newheads) > 1:
+ ui.status(_('not merging with %d other new branch heads '
+ '(use "hg heads ." and "hg merge" to merge them)\n') %
+ (len(newheads) - 1))
+ return
+
+ # Otherwise, let's merge.
+ err = False
+ if newheads:
+ # By default, we consider the repository we're pulling
+ # *from* as authoritative, so we merge our changes into
+ # theirs.
+ if opts['switch_parent']:
+ firstparent, secondparent = newparent, newheads[0]
+ else:
+ firstparent, secondparent = newheads[0], newparent
+ ui.status(_('updating to %d:%s\n') %
+ (repo.changelog.rev(firstparent),
+ short(firstparent)))
+ hg.clean(repo, firstparent)
+ ui.status(_('merging with %d:%s\n') %
+ (repo.changelog.rev(secondparent), short(secondparent)))
+ err = hg.merge(repo, secondparent, remind=False)
+
+ if not err:
+ # we don't translate commit messages
+ message = (cmdutil.logmessage(opts) or
+ ('Automated merge with %s' %
+ url.removeauth(other.url())))
+ editor = cmdutil.commiteditor
+ if opts.get('force_editor') or opts.get('edit'):
+ editor = cmdutil.commitforceeditor
+ n = repo.commit(message, opts['user'], opts['date'], editor=editor)
+ ui.status(_('new changeset %d:%s merges remote changes '
+ 'with local\n') % (repo.changelog.rev(n),
+ short(n)))
+
+ finally:
+ release(lock, wlock)
+
+cmdtable = {
+ 'fetch':
+ (fetch,
+ [('r', 'rev', [], _('a specific revision you would like to pull')),
+ ('e', 'edit', None, _('edit commit message')),
+ ('', 'force-editor', None, _('edit commit message (DEPRECATED)')),
+ ('', 'switch-parent', None, _('switch parents when merging')),
+ ] + commands.commitopts + commands.commitopts2 + commands.remoteopts,
+ _('hg fetch [SOURCE]')),
+}
diff --git a/sys/src/cmd/hg/hgext/gpg.py b/sys/src/cmd/hg/hgext/gpg.py
new file mode 100644
index 000000000..4a2f07d8e
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/gpg.py
@@ -0,0 +1,284 @@
+# Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''commands to sign and verify changesets'''
+
+import os, tempfile, binascii
+from mercurial import util, commands, match
+from mercurial import node as hgnode
+from mercurial.i18n import _
+
+class gpg(object):
+ def __init__(self, path, key=None):
+ self.path = path
+ self.key = (key and " --local-user \"%s\"" % key) or ""
+
+ def sign(self, data):
+ gpgcmd = "%s --sign --detach-sign%s" % (self.path, self.key)
+ return util.filter(data, gpgcmd)
+
+ def verify(self, data, sig):
+ """ returns of the good and bad signatures"""
+ sigfile = datafile = None
+ try:
+ # create temporary files
+ fd, sigfile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".sig")
+ fp = os.fdopen(fd, 'wb')
+ fp.write(sig)
+ fp.close()
+ fd, datafile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".txt")
+ fp = os.fdopen(fd, 'wb')
+ fp.write(data)
+ fp.close()
+ gpgcmd = ("%s --logger-fd 1 --status-fd 1 --verify "
+ "\"%s\" \"%s\"" % (self.path, sigfile, datafile))
+ ret = util.filter("", gpgcmd)
+ finally:
+ for f in (sigfile, datafile):
+ try:
+ if f: os.unlink(f)
+ except: pass
+ keys = []
+ key, fingerprint = None, None
+ err = ""
+ for l in ret.splitlines():
+ # see DETAILS in the gnupg documentation
+ # filter the logger output
+ if not l.startswith("[GNUPG:]"):
+ continue
+ l = l[9:]
+ if l.startswith("ERRSIG"):
+ err = _("error while verifying signature")
+ break
+ elif l.startswith("VALIDSIG"):
+ # fingerprint of the primary key
+ fingerprint = l.split()[10]
+ elif (l.startswith("GOODSIG") or
+ l.startswith("EXPSIG") or
+ l.startswith("EXPKEYSIG") or
+ l.startswith("BADSIG")):
+ if key is not None:
+ keys.append(key + [fingerprint])
+ key = l.split(" ", 2)
+ fingerprint = None
+ if err:
+ return err, []
+ if key is not None:
+ keys.append(key + [fingerprint])
+ return err, keys
+
+def newgpg(ui, **opts):
+ """create a new gpg instance"""
+ gpgpath = ui.config("gpg", "cmd", "gpg")
+ gpgkey = opts.get('key')
+ if not gpgkey:
+ gpgkey = ui.config("gpg", "key", None)
+ return gpg(gpgpath, gpgkey)
+
+def sigwalk(repo):
+ """
+ walk over every sigs, yields a couple
+ ((node, version, sig), (filename, linenumber))
+ """
+ def parsefile(fileiter, context):
+ ln = 1
+ for l in fileiter:
+ if not l:
+ continue
+ yield (l.split(" ", 2), (context, ln))
+ ln +=1
+
+ # read the heads
+ fl = repo.file(".hgsigs")
+ for r in reversed(fl.heads()):
+ fn = ".hgsigs|%s" % hgnode.short(r)
+ for item in parsefile(fl.read(r).splitlines(), fn):
+ yield item
+ try:
+ # read local signatures
+ fn = "localsigs"
+ for item in parsefile(repo.opener(fn), fn):
+ yield item
+ except IOError:
+ pass
+
+def getkeys(ui, repo, mygpg, sigdata, context):
+ """get the keys who signed a data"""
+ fn, ln = context
+ node, version, sig = sigdata
+ prefix = "%s:%d" % (fn, ln)
+ node = hgnode.bin(node)
+
+ data = node2txt(repo, node, version)
+ sig = binascii.a2b_base64(sig)
+ err, keys = mygpg.verify(data, sig)
+ if err:
+ ui.warn("%s:%d %s\n" % (fn, ln , err))
+ return None
+
+ validkeys = []
+ # warn for expired key and/or sigs
+ for key in keys:
+ if key[0] == "BADSIG":
+ ui.write(_("%s Bad signature from \"%s\"\n") % (prefix, key[2]))
+ continue
+ if key[0] == "EXPSIG":
+ ui.write(_("%s Note: Signature has expired"
+ " (signed by: \"%s\")\n") % (prefix, key[2]))
+ elif key[0] == "EXPKEYSIG":
+ ui.write(_("%s Note: This key has expired"
+ " (signed by: \"%s\")\n") % (prefix, key[2]))
+ validkeys.append((key[1], key[2], key[3]))
+ return validkeys
+
+def sigs(ui, repo):
+ """list signed changesets"""
+ mygpg = newgpg(ui)
+ revs = {}
+
+ for data, context in sigwalk(repo):
+ node, version, sig = data
+ fn, ln = context
+ try:
+ n = repo.lookup(node)
+ except KeyError:
+ ui.warn(_("%s:%d node does not exist\n") % (fn, ln))
+ continue
+ r = repo.changelog.rev(n)
+ keys = getkeys(ui, repo, mygpg, data, context)
+ if not keys:
+ continue
+ revs.setdefault(r, [])
+ revs[r].extend(keys)
+ for rev in sorted(revs, reverse=True):
+ for k in revs[rev]:
+ r = "%5d:%s" % (rev, hgnode.hex(repo.changelog.node(rev)))
+ ui.write("%-30s %s\n" % (keystr(ui, k), r))
+
+def check(ui, repo, rev):
+ """verify all the signatures there may be for a particular revision"""
+ mygpg = newgpg(ui)
+ rev = repo.lookup(rev)
+ hexrev = hgnode.hex(rev)
+ keys = []
+
+ for data, context in sigwalk(repo):
+ node, version, sig = data
+ if node == hexrev:
+ k = getkeys(ui, repo, mygpg, data, context)
+ if k:
+ keys.extend(k)
+
+ if not keys:
+ ui.write(_("No valid signature for %s\n") % hgnode.short(rev))
+ return
+
+ # print summary
+ ui.write("%s is signed by:\n" % hgnode.short(rev))
+ for key in keys:
+ ui.write(" %s\n" % keystr(ui, key))
+
+def keystr(ui, key):
+ """associate a string to a key (username, comment)"""
+ keyid, user, fingerprint = key
+ comment = ui.config("gpg", fingerprint, None)
+ if comment:
+ return "%s (%s)" % (user, comment)
+ else:
+ return user
+
+def sign(ui, repo, *revs, **opts):
+ """add a signature for the current or given revision
+
+ If no revision is given, the parent of the working directory is used,
+ or tip if no revision is checked out.
+
+ See 'hg help dates' for a list of formats valid for -d/--date.
+ """
+
+ mygpg = newgpg(ui, **opts)
+ sigver = "0"
+ sigmessage = ""
+
+ date = opts.get('date')
+ if date:
+ opts['date'] = util.parsedate(date)
+
+ if revs:
+ nodes = [repo.lookup(n) for n in revs]
+ else:
+ nodes = [node for node in repo.dirstate.parents()
+ if node != hgnode.nullid]
+ if len(nodes) > 1:
+ raise util.Abort(_('uncommitted merge - please provide a '
+ 'specific revision'))
+ if not nodes:
+ nodes = [repo.changelog.tip()]
+
+ for n in nodes:
+ hexnode = hgnode.hex(n)
+ ui.write("Signing %d:%s\n" % (repo.changelog.rev(n),
+ hgnode.short(n)))
+ # build data
+ data = node2txt(repo, n, sigver)
+ sig = mygpg.sign(data)
+ if not sig:
+ raise util.Abort(_("Error while signing"))
+ sig = binascii.b2a_base64(sig)
+ sig = sig.replace("\n", "")
+ sigmessage += "%s %s %s\n" % (hexnode, sigver, sig)
+
+ # write it
+ if opts['local']:
+ repo.opener("localsigs", "ab").write(sigmessage)
+ return
+
+ for x in repo.status(unknown=True)[:5]:
+ if ".hgsigs" in x and not opts["force"]:
+ raise util.Abort(_("working copy of .hgsigs is changed "
+ "(please commit .hgsigs manually "
+ "or use --force)"))
+
+ repo.wfile(".hgsigs", "ab").write(sigmessage)
+
+ if '.hgsigs' not in repo.dirstate:
+ repo.add([".hgsigs"])
+
+ if opts["no_commit"]:
+ return
+
+ message = opts['message']
+ if not message:
+ # we don't translate commit messages
+ message = "\n".join(["Added signature for changeset %s"
+ % hgnode.short(n)
+ for n in nodes])
+ try:
+ m = match.exact(repo.root, '', ['.hgsigs'])
+ repo.commit(message, opts['user'], opts['date'], match=m)
+ except ValueError, inst:
+ raise util.Abort(str(inst))
+
+def node2txt(repo, node, ver):
+ """map a manifest into some text"""
+ if ver == "0":
+ return "%s\n" % hgnode.hex(node)
+ else:
+ raise util.Abort(_("unknown signature version"))
+
+cmdtable = {
+ "sign":
+ (sign,
+ [('l', 'local', None, _('make the signature local')),
+ ('f', 'force', None, _('sign even if the sigfile is modified')),
+ ('', 'no-commit', None, _('do not commit the sigfile after signing')),
+ ('k', 'key', '', _('the key id to sign with')),
+ ('m', 'message', '', _('commit message')),
+ ] + commands.commitopts2,
+ _('hg sign [OPTION]... [REVISION]...')),
+ "sigcheck": (check, [], _('hg sigcheck REVISION')),
+ "sigs": (sigs, [], _('hg sigs')),
+}
+
diff --git a/sys/src/cmd/hg/hgext/graphlog.py b/sys/src/cmd/hg/hgext/graphlog.py
new file mode 100644
index 000000000..d77edf931
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/graphlog.py
@@ -0,0 +1,378 @@
+# ASCII graph log extension for Mercurial
+#
+# Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''command to view revision graphs from a shell
+
+This extension adds a --graph option to the incoming, outgoing and log
+commands. When this options is given, an ASCII representation of the
+revision graph is also shown.
+'''
+
+import os, sys
+from mercurial.cmdutil import revrange, show_changeset
+from mercurial.commands import templateopts
+from mercurial.i18n import _
+from mercurial.node import nullrev
+from mercurial import bundlerepo, changegroup, cmdutil, commands, extensions
+from mercurial import hg, url, util, graphmod
+
+ASCIIDATA = 'ASC'
+
+def asciiformat(ui, repo, revdag, opts, parentrepo=None):
+ """formats a changelog DAG walk for ASCII output"""
+ if parentrepo is None:
+ parentrepo = repo
+ showparents = [ctx.node() for ctx in parentrepo[None].parents()]
+ displayer = show_changeset(ui, repo, opts, buffered=True)
+ for (id, type, ctx, parentids) in revdag:
+ if type != graphmod.CHANGESET:
+ continue
+ displayer.show(ctx)
+ lines = displayer.hunk.pop(ctx.rev()).split('\n')[:-1]
+ char = ctx.node() in showparents and '@' or 'o'
+ yield (id, ASCIIDATA, (char, lines), parentids)
+
+def asciiedges(nodes):
+ """adds edge info to changelog DAG walk suitable for ascii()"""
+ seen = []
+ for node, type, data, parents in nodes:
+ if node not in seen:
+ seen.append(node)
+ nodeidx = seen.index(node)
+
+ knownparents = []
+ newparents = []
+ for parent in parents:
+ if parent in seen:
+ knownparents.append(parent)
+ else:
+ newparents.append(parent)
+
+ ncols = len(seen)
+ nextseen = seen[:]
+ nextseen[nodeidx:nodeidx + 1] = newparents
+ edges = [(nodeidx, nextseen.index(p)) for p in knownparents]
+
+ if len(newparents) > 0:
+ edges.append((nodeidx, nodeidx))
+ if len(newparents) > 1:
+ edges.append((nodeidx, nodeidx + 1))
+ nmorecols = len(nextseen) - ncols
+ seen = nextseen
+ yield (nodeidx, type, data, edges, ncols, nmorecols)
+
+def fix_long_right_edges(edges):
+ for (i, (start, end)) in enumerate(edges):
+ if end > start:
+ edges[i] = (start, end + 1)
+
+def get_nodeline_edges_tail(
+ node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
+ if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
+ # Still going in the same non-vertical direction.
+ if n_columns_diff == -1:
+ start = max(node_index + 1, p_node_index)
+ tail = ["|", " "] * (start - node_index - 1)
+ tail.extend(["/", " "] * (n_columns - start))
+ return tail
+ else:
+ return ["\\", " "] * (n_columns - node_index - 1)
+ else:
+ return ["|", " "] * (n_columns - node_index - 1)
+
+def draw_edges(edges, nodeline, interline):
+ for (start, end) in edges:
+ if start == end + 1:
+ interline[2 * end + 1] = "/"
+ elif start == end - 1:
+ interline[2 * start + 1] = "\\"
+ elif start == end:
+ interline[2 * start] = "|"
+ else:
+ nodeline[2 * end] = "+"
+ if start > end:
+ (start, end) = (end, start)
+ for i in range(2 * start + 1, 2 * end):
+ if nodeline[i] != "+":
+ nodeline[i] = "-"
+
+def get_padding_line(ni, n_columns, edges):
+ line = []
+ line.extend(["|", " "] * ni)
+ if (ni, ni - 1) in edges or (ni, ni) in edges:
+ # (ni, ni - 1) (ni, ni)
+ # | | | | | | | |
+ # +---o | | o---+
+ # | | c | | c | |
+ # | |/ / | |/ /
+ # | | | | | |
+ c = "|"
+ else:
+ c = " "
+ line.extend([c, " "])
+ line.extend(["|", " "] * (n_columns - ni - 1))
+ return line
+
+def ascii(ui, dag):
+ """prints an ASCII graph of the DAG
+
+ dag is a generator that emits tuples with the following elements:
+
+ - Column of the current node in the set of ongoing edges.
+ - Type indicator of node data == ASCIIDATA.
+ - Payload: (char, lines):
+ - Character to use as node's symbol.
+ - List of lines to display as the node's text.
+ - Edges; a list of (col, next_col) indicating the edges between
+ the current node and its parents.
+ - Number of columns (ongoing edges) in the current revision.
+ - The difference between the number of columns (ongoing edges)
+ in the next revision and the number of columns (ongoing edges)
+ in the current revision. That is: -1 means one column removed;
+ 0 means no columns added or removed; 1 means one column added.
+ """
+ prev_n_columns_diff = 0
+ prev_node_index = 0
+ for (node_index, type, (node_ch, node_lines), edges, n_columns, n_columns_diff) in dag:
+
+ assert -2 < n_columns_diff < 2
+ if n_columns_diff == -1:
+ # Transform
+ #
+ # | | | | | |
+ # o | | into o---+
+ # |X / |/ /
+ # | | | |
+ fix_long_right_edges(edges)
+
+ # add_padding_line says whether to rewrite
+ #
+ # | | | | | | | |
+ # | o---+ into | o---+
+ # | / / | | | # <--- padding line
+ # o | | | / /
+ # o | |
+ add_padding_line = (len(node_lines) > 2 and
+ n_columns_diff == -1 and
+ [x for (x, y) in edges if x + 1 < y])
+
+ # fix_nodeline_tail says whether to rewrite
+ #
+ # | | o | | | | o | |
+ # | | |/ / | | |/ /
+ # | o | | into | o / / # <--- fixed nodeline tail
+ # | |/ / | |/ /
+ # o | | o | |
+ fix_nodeline_tail = len(node_lines) <= 2 and not add_padding_line
+
+ # nodeline is the line containing the node character (typically o)
+ nodeline = ["|", " "] * node_index
+ nodeline.extend([node_ch, " "])
+
+ nodeline.extend(
+ get_nodeline_edges_tail(
+ node_index, prev_node_index, n_columns, n_columns_diff,
+ prev_n_columns_diff, fix_nodeline_tail))
+
+ # shift_interline is the line containing the non-vertical
+ # edges between this entry and the next
+ shift_interline = ["|", " "] * node_index
+ if n_columns_diff == -1:
+ n_spaces = 1
+ edge_ch = "/"
+ elif n_columns_diff == 0:
+ n_spaces = 2
+ edge_ch = "|"
+ else:
+ n_spaces = 3
+ edge_ch = "\\"
+ shift_interline.extend(n_spaces * [" "])
+ shift_interline.extend([edge_ch, " "] * (n_columns - node_index - 1))
+
+ # draw edges from the current node to its parents
+ draw_edges(edges, nodeline, shift_interline)
+
+ # lines is the list of all graph lines to print
+ lines = [nodeline]
+ if add_padding_line:
+ lines.append(get_padding_line(node_index, n_columns, edges))
+ lines.append(shift_interline)
+
+ # make sure that there are as many graph lines as there are
+ # log strings
+ while len(node_lines) < len(lines):
+ node_lines.append("")
+ if len(lines) < len(node_lines):
+ extra_interline = ["|", " "] * (n_columns + n_columns_diff)
+ while len(lines) < len(node_lines):
+ lines.append(extra_interline)
+
+ # print lines
+ indentation_level = max(n_columns, n_columns + n_columns_diff)
+ for (line, logstr) in zip(lines, node_lines):
+ ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
+ ui.write(ln.rstrip() + '\n')
+
+ # ... and start over
+ prev_node_index = node_index
+ prev_n_columns_diff = n_columns_diff
+
+def get_revs(repo, rev_opt):
+ if rev_opt:
+ revs = revrange(repo, rev_opt)
+ return (max(revs), min(revs))
+ else:
+ return (len(repo) - 1, 0)
+
+def check_unsupported_flags(opts):
+ for op in ["follow", "follow_first", "date", "copies", "keyword", "remove",
+ "only_merges", "user", "only_branch", "prune", "newest_first",
+ "no_merges", "include", "exclude"]:
+ if op in opts and opts[op]:
+ raise util.Abort(_("--graph option is incompatible with --%s") % op)
+
+def graphlog(ui, repo, path=None, **opts):
+ """show revision history alongside an ASCII revision graph
+
+ Print a revision history alongside a revision graph drawn with
+ ASCII characters.
+
+ Nodes printed as an @ character are parents of the working
+ directory.
+ """
+
+ check_unsupported_flags(opts)
+ limit = cmdutil.loglimit(opts)
+ start, stop = get_revs(repo, opts["rev"])
+ stop = max(stop, start - limit + 1)
+ if start == nullrev:
+ return
+
+ if path:
+ path = util.canonpath(repo.root, os.getcwd(), path)
+ if path: # could be reset in canonpath
+ revdag = graphmod.filerevs(repo, path, start, stop)
+ else:
+ revdag = graphmod.revisions(repo, start, stop)
+
+ fmtdag = asciiformat(ui, repo, revdag, opts)
+ ascii(ui, asciiedges(fmtdag))
+
+def graphrevs(repo, nodes, opts):
+ limit = cmdutil.loglimit(opts)
+ nodes.reverse()
+ if limit < sys.maxint:
+ nodes = nodes[:limit]
+ return graphmod.nodes(repo, nodes)
+
+def goutgoing(ui, repo, dest=None, **opts):
+ """show the outgoing changesets alongside an ASCII revision graph
+
+ Print the outgoing changesets alongside a revision graph drawn with
+ ASCII characters.
+
+ Nodes printed as an @ character are parents of the working
+ directory.
+ """
+
+ check_unsupported_flags(opts)
+ dest, revs, checkout = hg.parseurl(
+ ui.expandpath(dest or 'default-push', dest or 'default'),
+ opts.get('rev'))
+ if revs:
+ revs = [repo.lookup(rev) for rev in revs]
+ other = hg.repository(cmdutil.remoteui(ui, opts), dest)
+ ui.status(_('comparing with %s\n') % url.hidepassword(dest))
+ o = repo.findoutgoing(other, force=opts.get('force'))
+ if not o:
+ ui.status(_("no changes found\n"))
+ return
+
+ o = repo.changelog.nodesbetween(o, revs)[0]
+ revdag = graphrevs(repo, o, opts)
+ fmtdag = asciiformat(ui, repo, revdag, opts)
+ ascii(ui, asciiedges(fmtdag))
+
+def gincoming(ui, repo, source="default", **opts):
+ """show the incoming changesets alongside an ASCII revision graph
+
+ Print the incoming changesets alongside a revision graph drawn with
+ ASCII characters.
+
+ Nodes printed as an @ character are parents of the working
+ directory.
+ """
+
+ check_unsupported_flags(opts)
+ source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
+ other = hg.repository(cmdutil.remoteui(repo, opts), source)
+ ui.status(_('comparing with %s\n') % url.hidepassword(source))
+ if revs:
+ revs = [other.lookup(rev) for rev in revs]
+ incoming = repo.findincoming(other, heads=revs, force=opts["force"])
+ if not incoming:
+ try:
+ os.unlink(opts["bundle"])
+ except:
+ pass
+ ui.status(_("no changes found\n"))
+ return
+
+ cleanup = None
+ try:
+
+ fname = opts["bundle"]
+ if fname or not other.local():
+ # create a bundle (uncompressed if other repo is not local)
+ if revs is None:
+ cg = other.changegroup(incoming, "incoming")
+ else:
+ cg = other.changegroupsubset(incoming, revs, 'incoming')
+ bundletype = other.local() and "HG10BZ" or "HG10UN"
+ fname = cleanup = changegroup.writebundle(cg, fname, bundletype)
+ # keep written bundle?
+ if opts["bundle"]:
+ cleanup = None
+ if not other.local():
+ # use the created uncompressed bundlerepo
+ other = bundlerepo.bundlerepository(ui, repo.root, fname)
+
+ chlist = other.changelog.nodesbetween(incoming, revs)[0]
+ revdag = graphrevs(other, chlist, opts)
+ fmtdag = asciiformat(ui, other, revdag, opts, parentrepo=repo)
+ ascii(ui, asciiedges(fmtdag))
+
+ finally:
+ if hasattr(other, 'close'):
+ other.close()
+ if cleanup:
+ os.unlink(cleanup)
+
+def uisetup(ui):
+ '''Initialize the extension.'''
+ _wrapcmd(ui, 'log', commands.table, graphlog)
+ _wrapcmd(ui, 'incoming', commands.table, gincoming)
+ _wrapcmd(ui, 'outgoing', commands.table, goutgoing)
+
+def _wrapcmd(ui, cmd, table, wrapfn):
+ '''wrap the command'''
+ def graph(orig, *args, **kwargs):
+ if kwargs['graph']:
+ return wrapfn(*args, **kwargs)
+ return orig(*args, **kwargs)
+ entry = extensions.wrapcommand(table, cmd, graph)
+ entry[1].append(('G', 'graph', None, _("show the revision DAG")))
+
+cmdtable = {
+ "glog":
+ (graphlog,
+ [('l', 'limit', '', _('limit number of changes displayed')),
+ ('p', 'patch', False, _('show patch')),
+ ('r', 'rev', [], _('show the specified revision or range')),
+ ] + templateopts,
+ _('hg glog [OPTION]... [FILE]')),
+}
diff --git a/sys/src/cmd/hg/hgext/hgcia.py b/sys/src/cmd/hg/hgext/hgcia.py
new file mode 100644
index 000000000..dfae38919
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/hgcia.py
@@ -0,0 +1,246 @@
+# Copyright (C) 2007-8 Brendan Cully <brendan@kublai.com>
+# Published under the GNU GPL
+
+"""hooks for integrating with the CIA.vc notification service
+
+This is meant to be run as a changegroup or incoming hook. To
+configure it, set the following options in your hgrc::
+
+ [cia]
+ # your registered CIA user name
+ user = foo
+ # the name of the project in CIA
+ project = foo
+ # the module (subproject) (optional)
+ #module = foo
+ # Append a diffstat to the log message (optional)
+ #diffstat = False
+ # Template to use for log messages (optional)
+ #template = {desc}\\n{baseurl}/rev/{node}-- {diffstat}
+ # Style to use (optional)
+ #style = foo
+ # The URL of the CIA notification service (optional)
+ # You can use mailto: URLs to send by email, eg
+ # mailto:cia@cia.vc
+ # Make sure to set email.from if you do this.
+ #url = http://cia.vc/
+ # print message instead of sending it (optional)
+ #test = False
+
+ [hooks]
+ # one of these:
+ changegroup.cia = python:hgcia.hook
+ #incoming.cia = python:hgcia.hook
+
+ [web]
+ # If you want hyperlinks (optional)
+ baseurl = http://server/path/to/repo
+"""
+
+from mercurial.i18n import _
+from mercurial.node import *
+from mercurial import cmdutil, patch, templater, util, mail
+import email.Parser
+
+import xmlrpclib
+from xml.sax import saxutils
+
+socket_timeout = 30 # seconds
+try:
+ # set a timeout for the socket so you don't have to wait so looooong
+ # when cia.vc is having problems. requires python >= 2.3:
+ import socket
+ socket.setdefaulttimeout(socket_timeout)
+except:
+ pass
+
+HGCIA_VERSION = '0.1'
+HGCIA_URL = 'http://hg.kublai.com/mercurial/hgcia'
+
+
+class ciamsg(object):
+ """ A CIA message """
+ def __init__(self, cia, ctx):
+ self.cia = cia
+ self.ctx = ctx
+ self.url = self.cia.url
+
+ def fileelem(self, path, uri, action):
+ if uri:
+ uri = ' uri=%s' % saxutils.quoteattr(uri)
+ return '<file%s action=%s>%s</file>' % (
+ uri, saxutils.quoteattr(action), saxutils.escape(path))
+
+ def fileelems(self):
+ n = self.ctx.node()
+ f = self.cia.repo.status(self.ctx.parents()[0].node(), n)
+ url = self.url or ''
+ elems = []
+ for path in f[0]:
+ uri = '%s/diff/%s/%s' % (url, short(n), path)
+ elems.append(self.fileelem(path, url and uri, 'modify'))
+ for path in f[1]:
+ # TODO: copy/rename ?
+ uri = '%s/file/%s/%s' % (url, short(n), path)
+ elems.append(self.fileelem(path, url and uri, 'add'))
+ for path in f[2]:
+ elems.append(self.fileelem(path, '', 'remove'))
+
+ return '\n'.join(elems)
+
+ def sourceelem(self, project, module=None, branch=None):
+ msg = ['<source>', '<project>%s</project>' % saxutils.escape(project)]
+ if module:
+ msg.append('<module>%s</module>' % saxutils.escape(module))
+ if branch:
+ msg.append('<branch>%s</branch>' % saxutils.escape(branch))
+ msg.append('</source>')
+
+ return '\n'.join(msg)
+
+ def diffstat(self):
+ class patchbuf(object):
+ def __init__(self):
+ self.lines = []
+ # diffstat is stupid
+ self.name = 'cia'
+ def write(self, data):
+ self.lines.append(data)
+ def close(self):
+ pass
+
+ n = self.ctx.node()
+ pbuf = patchbuf()
+ patch.export(self.cia.repo, [n], fp=pbuf)
+ return patch.diffstat(pbuf.lines) or ''
+
+ def logmsg(self):
+ diffstat = self.cia.diffstat and self.diffstat() or ''
+ self.cia.ui.pushbuffer()
+ self.cia.templater.show(self.ctx, changes=self.ctx.changeset(),
+ url=self.cia.url, diffstat=diffstat)
+ return self.cia.ui.popbuffer()
+
+ def xml(self):
+ n = short(self.ctx.node())
+ src = self.sourceelem(self.cia.project, module=self.cia.module,
+ branch=self.ctx.branch())
+ # unix timestamp
+ dt = self.ctx.date()
+ timestamp = dt[0]
+
+ author = saxutils.escape(self.ctx.user())
+ rev = '%d:%s' % (self.ctx.rev(), n)
+ log = saxutils.escape(self.logmsg())
+
+ url = self.url and '<url>%s/rev/%s</url>' % (saxutils.escape(self.url),
+ n) or ''
+
+ msg = """
+<message>
+ <generator>
+ <name>Mercurial (hgcia)</name>
+ <version>%s</version>
+ <url>%s</url>
+ <user>%s</user>
+ </generator>
+ %s
+ <body>
+ <commit>
+ <author>%s</author>
+ <version>%s</version>
+ <log>%s</log>
+ %s
+ <files>%s</files>
+ </commit>
+ </body>
+ <timestamp>%d</timestamp>
+</message>
+""" % \
+ (HGCIA_VERSION, saxutils.escape(HGCIA_URL),
+ saxutils.escape(self.cia.user), src, author, rev, log, url,
+ self.fileelems(), timestamp)
+
+ return msg
+
+
+class hgcia(object):
+ """ CIA notification class """
+
+ deftemplate = '{desc}'
+ dstemplate = '{desc}\n-- \n{diffstat}'
+
+ def __init__(self, ui, repo):
+ self.ui = ui
+ self.repo = repo
+
+ self.ciaurl = self.ui.config('cia', 'url', 'http://cia.vc')
+ self.user = self.ui.config('cia', 'user')
+ self.project = self.ui.config('cia', 'project')
+ self.module = self.ui.config('cia', 'module')
+ self.diffstat = self.ui.configbool('cia', 'diffstat')
+ self.emailfrom = self.ui.config('email', 'from')
+ self.dryrun = self.ui.configbool('cia', 'test')
+ self.url = self.ui.config('web', 'baseurl')
+
+ style = self.ui.config('cia', 'style')
+ template = self.ui.config('cia', 'template')
+ if not template:
+ template = self.diffstat and self.dstemplate or self.deftemplate
+ template = templater.parsestring(template, quoted=False)
+ t = cmdutil.changeset_templater(self.ui, self.repo, False, None,
+ style, False)
+ t.use_template(template)
+ self.templater = t
+
+ def sendrpc(self, msg):
+ srv = xmlrpclib.Server(self.ciaurl)
+ srv.hub.deliver(msg)
+
+ def sendemail(self, address, data):
+ p = email.Parser.Parser()
+ msg = p.parsestr(data)
+ msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2")
+ msg['To'] = address
+ msg['From'] = self.emailfrom
+ msg['Subject'] = 'DeliverXML'
+ msg['Content-type'] = 'text/xml'
+ msgtext = msg.as_string()
+
+ self.ui.status(_('hgcia: sending update to %s\n') % address)
+ mail.sendmail(self.ui, util.email(self.emailfrom),
+ [address], msgtext)
+
+
+def hook(ui, repo, hooktype, node=None, url=None, **kwargs):
+ """ send CIA notification """
+ def sendmsg(cia, ctx):
+ msg = ciamsg(cia, ctx).xml()
+ if cia.dryrun:
+ ui.write(msg)
+ elif cia.ciaurl.startswith('mailto:'):
+ if not cia.emailfrom:
+ raise util.Abort(_('email.from must be defined when '
+ 'sending by email'))
+ cia.sendemail(cia.ciaurl[7:], msg)
+ else:
+ cia.sendrpc(msg)
+
+ n = bin(node)
+ cia = hgcia(ui, repo)
+ if not cia.user:
+ ui.debug(_('cia: no user specified'))
+ return
+ if not cia.project:
+ ui.debug(_('cia: no project specified'))
+ return
+ if hooktype == 'changegroup':
+ start = repo.changelog.rev(n)
+ end = len(repo.changelog)
+ for rev in xrange(start, end):
+ n = repo.changelog.node(rev)
+ ctx = repo.changectx(n)
+ sendmsg(cia, ctx)
+ else:
+ ctx = repo.changectx(n)
+ sendmsg(cia, ctx)
diff --git a/sys/src/cmd/hg/hgext/hgk.py b/sys/src/cmd/hg/hgext/hgk.py
new file mode 100644
index 000000000..03441ce00
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/hgk.py
@@ -0,0 +1,347 @@
+# Minimal support for git commands on an hg repository
+#
+# Copyright 2005, 2006 Chris Mason <mason@suse.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''browse the repository in a graphical way
+
+The hgk extension allows browsing the history of a repository in a
+graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not
+distributed with Mercurial.)
+
+hgk consists of two parts: a Tcl script that does the displaying and
+querying of information, and an extension to Mercurial named hgk.py,
+which provides hooks for hgk to get information. hgk can be found in
+the contrib directory, and the extension is shipped in the hgext
+repository, and needs to be enabled.
+
+The hg view command will launch the hgk Tcl script. For this command
+to work, hgk must be in your search path. Alternately, you can specify
+the path to hgk in your .hgrc file::
+
+ [hgk]
+ path=/location/of/hgk
+
+hgk can make use of the extdiff extension to visualize revisions.
+Assuming you had already configured extdiff vdiff command, just add::
+
+ [hgk]
+ vdiff=vdiff
+
+Revisions context menu will now display additional entries to fire
+vdiff on hovered and selected revisions.
+'''
+
+import os
+from mercurial import commands, util, patch, revlog, cmdutil
+from mercurial.node import nullid, nullrev, short
+from mercurial.i18n import _
+
+def difftree(ui, repo, node1=None, node2=None, *files, **opts):
+ """diff trees from two commits"""
+ def __difftree(repo, node1, node2, files=[]):
+ assert node2 is not None
+ mmap = repo[node1].manifest()
+ mmap2 = repo[node2].manifest()
+ m = cmdutil.match(repo, files)
+ modified, added, removed = repo.status(node1, node2, m)[:3]
+ empty = short(nullid)
+
+ for f in modified:
+ # TODO get file permissions
+ ui.write(":100664 100664 %s %s M\t%s\t%s\n" %
+ (short(mmap[f]), short(mmap2[f]), f, f))
+ for f in added:
+ ui.write(":000000 100664 %s %s N\t%s\t%s\n" %
+ (empty, short(mmap2[f]), f, f))
+ for f in removed:
+ ui.write(":100664 000000 %s %s D\t%s\t%s\n" %
+ (short(mmap[f]), empty, f, f))
+ ##
+
+ while True:
+ if opts['stdin']:
+ try:
+ line = raw_input().split(' ')
+ node1 = line[0]
+ if len(line) > 1:
+ node2 = line[1]
+ else:
+ node2 = None
+ except EOFError:
+ break
+ node1 = repo.lookup(node1)
+ if node2:
+ node2 = repo.lookup(node2)
+ else:
+ node2 = node1
+ node1 = repo.changelog.parents(node1)[0]
+ if opts['patch']:
+ if opts['pretty']:
+ catcommit(ui, repo, node2, "")
+ m = cmdutil.match(repo, files)
+ chunks = patch.diff(repo, node1, node2, match=m,
+ opts=patch.diffopts(ui, {'git': True}))
+ for chunk in chunks:
+ ui.write(chunk)
+ else:
+ __difftree(repo, node1, node2, files=files)
+ if not opts['stdin']:
+ break
+
+def catcommit(ui, repo, n, prefix, ctx=None):
+ nlprefix = '\n' + prefix;
+ if ctx is None:
+ ctx = repo[n]
+ ui.write("tree %s\n" % short(ctx.changeset()[0])) # use ctx.node() instead ??
+ for p in ctx.parents():
+ ui.write("parent %s\n" % p)
+
+ date = ctx.date()
+ description = ctx.description().replace("\0", "")
+ lines = description.splitlines()
+ if lines and lines[-1].startswith('committer:'):
+ committer = lines[-1].split(': ')[1].rstrip()
+ else:
+ committer = ctx.user()
+
+ ui.write("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1]))
+ ui.write("committer %s %s %s\n" % (committer, int(date[0]), date[1]))
+ ui.write("revision %d\n" % ctx.rev())
+ ui.write("branch %s\n\n" % ctx.branch())
+
+ if prefix != "":
+ ui.write("%s%s\n" % (prefix, description.replace('\n', nlprefix).strip()))
+ else:
+ ui.write(description + "\n")
+ if prefix:
+ ui.write('\0')
+
+def base(ui, repo, node1, node2):
+ """output common ancestor information"""
+ node1 = repo.lookup(node1)
+ node2 = repo.lookup(node2)
+ n = repo.changelog.ancestor(node1, node2)
+ ui.write(short(n) + "\n")
+
+def catfile(ui, repo, type=None, r=None, **opts):
+ """cat a specific revision"""
+ # in stdin mode, every line except the commit is prefixed with two
+ # spaces. This way the our caller can find the commit without magic
+ # strings
+ #
+ prefix = ""
+ if opts['stdin']:
+ try:
+ (type, r) = raw_input().split(' ');
+ prefix = " "
+ except EOFError:
+ return
+
+ else:
+ if not type or not r:
+ ui.warn(_("cat-file: type or revision not supplied\n"))
+ commands.help_(ui, 'cat-file')
+
+ while r:
+ if type != "commit":
+ ui.warn(_("aborting hg cat-file only understands commits\n"))
+ return 1;
+ n = repo.lookup(r)
+ catcommit(ui, repo, n, prefix)
+ if opts['stdin']:
+ try:
+ (type, r) = raw_input().split(' ');
+ except EOFError:
+ break
+ else:
+ break
+
+# git rev-tree is a confusing thing. You can supply a number of
+# commit sha1s on the command line, and it walks the commit history
+# telling you which commits are reachable from the supplied ones via
+# a bitmask based on arg position.
+# you can specify a commit to stop at by starting the sha1 with ^
+def revtree(ui, args, repo, full="tree", maxnr=0, parents=False):
+ def chlogwalk():
+ count = len(repo)
+ i = count
+ l = [0] * 100
+ chunk = 100
+ while True:
+ if chunk > i:
+ chunk = i
+ i = 0
+ else:
+ i -= chunk
+
+ for x in xrange(chunk):
+ if i + x >= count:
+ l[chunk - x:] = [0] * (chunk - x)
+ break
+ if full != None:
+ l[x] = repo[i + x]
+ l[x].changeset() # force reading
+ else:
+ l[x] = 1
+ for x in xrange(chunk-1, -1, -1):
+ if l[x] != 0:
+ yield (i + x, full != None and l[x] or None)
+ if i == 0:
+ break
+
+ # calculate and return the reachability bitmask for sha
+ def is_reachable(ar, reachable, sha):
+ if len(ar) == 0:
+ return 1
+ mask = 0
+ for i in xrange(len(ar)):
+ if sha in reachable[i]:
+ mask |= 1 << i
+
+ return mask
+
+ reachable = []
+ stop_sha1 = []
+ want_sha1 = []
+ count = 0
+
+ # figure out which commits they are asking for and which ones they
+ # want us to stop on
+ for i, arg in enumerate(args):
+ if arg.startswith('^'):
+ s = repo.lookup(arg[1:])
+ stop_sha1.append(s)
+ want_sha1.append(s)
+ elif arg != 'HEAD':
+ want_sha1.append(repo.lookup(arg))
+
+ # calculate the graph for the supplied commits
+ for i, n in enumerate(want_sha1):
+ reachable.append(set());
+ visit = [n];
+ reachable[i].add(n)
+ while visit:
+ n = visit.pop(0)
+ if n in stop_sha1:
+ continue
+ for p in repo.changelog.parents(n):
+ if p not in reachable[i]:
+ reachable[i].add(p)
+ visit.append(p)
+ if p in stop_sha1:
+ continue
+
+ # walk the repository looking for commits that are in our
+ # reachability graph
+ for i, ctx in chlogwalk():
+ n = repo.changelog.node(i)
+ mask = is_reachable(want_sha1, reachable, n)
+ if mask:
+ parentstr = ""
+ if parents:
+ pp = repo.changelog.parents(n)
+ if pp[0] != nullid:
+ parentstr += " " + short(pp[0])
+ if pp[1] != nullid:
+ parentstr += " " + short(pp[1])
+ if not full:
+ ui.write("%s%s\n" % (short(n), parentstr))
+ elif full == "commit":
+ ui.write("%s%s\n" % (short(n), parentstr))
+ catcommit(ui, repo, n, ' ', ctx)
+ else:
+ (p1, p2) = repo.changelog.parents(n)
+ (h, h1, h2) = map(short, (n, p1, p2))
+ (i1, i2) = map(repo.changelog.rev, (p1, p2))
+
+ date = ctx.date()[0]
+ ui.write("%s %s:%s" % (date, h, mask))
+ mask = is_reachable(want_sha1, reachable, p1)
+ if i1 != nullrev and mask > 0:
+ ui.write("%s:%s " % (h1, mask)),
+ mask = is_reachable(want_sha1, reachable, p2)
+ if i2 != nullrev and mask > 0:
+ ui.write("%s:%s " % (h2, mask))
+ ui.write("\n")
+ if maxnr and count >= maxnr:
+ break
+ count += 1
+
+def revparse(ui, repo, *revs, **opts):
+ """parse given revisions"""
+ def revstr(rev):
+ if rev == 'HEAD':
+ rev = 'tip'
+ return revlog.hex(repo.lookup(rev))
+
+ for r in revs:
+ revrange = r.split(':', 1)
+ ui.write('%s\n' % revstr(revrange[0]))
+ if len(revrange) == 2:
+ ui.write('^%s\n' % revstr(revrange[1]))
+
+# git rev-list tries to order things by date, and has the ability to stop
+# at a given commit without walking the whole repo. TODO add the stop
+# parameter
+def revlist(ui, repo, *revs, **opts):
+ """print revisions"""
+ if opts['header']:
+ full = "commit"
+ else:
+ full = None
+ copy = [x for x in revs]
+ revtree(ui, copy, repo, full, opts['max_count'], opts['parents'])
+
+def config(ui, repo, **opts):
+ """print extension options"""
+ def writeopt(name, value):
+ ui.write('k=%s\nv=%s\n' % (name, value))
+
+ writeopt('vdiff', ui.config('hgk', 'vdiff', ''))
+
+
+def view(ui, repo, *etc, **opts):
+ "start interactive history viewer"
+ os.chdir(repo.root)
+ optstr = ' '.join(['--%s %s' % (k, v) for k, v in opts.iteritems() if v])
+ cmd = ui.config("hgk", "path", "hgk") + " %s %s" % (optstr, " ".join(etc))
+ ui.debug(_("running %s\n") % cmd)
+ util.system(cmd)
+
+cmdtable = {
+ "^view":
+ (view,
+ [('l', 'limit', '', _('limit number of changes displayed'))],
+ _('hg view [-l LIMIT] [REVRANGE]')),
+ "debug-diff-tree":
+ (difftree,
+ [('p', 'patch', None, _('generate patch')),
+ ('r', 'recursive', None, _('recursive')),
+ ('P', 'pretty', None, _('pretty')),
+ ('s', 'stdin', None, _('stdin')),
+ ('C', 'copy', None, _('detect copies')),
+ ('S', 'search', "", _('search'))],
+ _('hg git-diff-tree [OPTION]... NODE1 NODE2 [FILE]...')),
+ "debug-cat-file":
+ (catfile,
+ [('s', 'stdin', None, _('stdin'))],
+ _('hg debug-cat-file [OPTION]... TYPE FILE')),
+ "debug-config":
+ (config, [], _('hg debug-config')),
+ "debug-merge-base":
+ (base, [], _('hg debug-merge-base REV REV')),
+ "debug-rev-parse":
+ (revparse,
+ [('', 'default', '', _('ignored'))],
+ _('hg debug-rev-parse REV')),
+ "debug-rev-list":
+ (revlist,
+ [('H', 'header', None, _('header')),
+ ('t', 'topo-order', None, _('topo-order')),
+ ('p', 'parents', None, _('parents')),
+ ('n', 'max-count', 0, _('max-count'))],
+ _('hg debug-rev-list [OPTION]... REV...')),
+}
diff --git a/sys/src/cmd/hg/hgext/highlight/__init__.py b/sys/src/cmd/hg/hgext/highlight/__init__.py
new file mode 100644
index 000000000..65efae3c9
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/highlight/__init__.py
@@ -0,0 +1,60 @@
+# highlight - syntax highlighting in hgweb, based on Pygments
+#
+# Copyright 2008, 2009 Patrick Mezard <pmezard@gmail.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+#
+# The original module was split in an interface and an implementation
+# file to defer pygments loading and speedup extension setup.
+
+"""syntax highlighting for hgweb (requires Pygments)
+
+It depends on the Pygments syntax highlighting library:
+http://pygments.org/
+
+There is a single configuration option::
+
+ [web]
+ pygments_style = <style>
+
+The default is 'colorful'.
+"""
+
+import highlight
+from mercurial.hgweb import webcommands, webutil, common
+from mercurial import extensions, encoding
+
+def filerevision_highlight(orig, web, tmpl, fctx):
+ mt = ''.join(tmpl('mimetype', encoding=encoding.encoding))
+ # only pygmentize for mimetype containing 'html' so we both match
+ # 'text/html' and possibly 'application/xhtml+xml' in the future
+ # so that we don't have to touch the extension when the mimetype
+ # for a template changes; also hgweb optimizes the case that a
+ # raw file is sent using rawfile() and doesn't call us, so we
+ # can't clash with the file's content-type here in case we
+ # pygmentize a html file
+ if 'html' in mt:
+ style = web.config('web', 'pygments_style', 'colorful')
+ highlight.pygmentize('fileline', fctx, style, tmpl)
+ return orig(web, tmpl, fctx)
+
+def annotate_highlight(orig, web, req, tmpl):
+ mt = ''.join(tmpl('mimetype', encoding=encoding.encoding))
+ if 'html' in mt:
+ fctx = webutil.filectx(web.repo, req)
+ style = web.config('web', 'pygments_style', 'colorful')
+ highlight.pygmentize('annotateline', fctx, style, tmpl)
+ return orig(web, req, tmpl)
+
+def generate_css(web, req, tmpl):
+ pg_style = web.config('web', 'pygments_style', 'colorful')
+ fmter = highlight.HtmlFormatter(style = pg_style)
+ req.respond(common.HTTP_OK, 'text/css')
+ return ['/* pygments_style = %s */\n\n' % pg_style, fmter.get_style_defs('')]
+
+# monkeypatch in the new version
+extensions.wrapfunction(webcommands, '_filerevision', filerevision_highlight)
+extensions.wrapfunction(webcommands, 'annotate', annotate_highlight)
+webcommands.highlightcss = generate_css
+webcommands.__all__.append('highlightcss')
diff --git a/sys/src/cmd/hg/hgext/highlight/highlight.py b/sys/src/cmd/hg/hgext/highlight/highlight.py
new file mode 100644
index 000000000..0f767234d
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/highlight/highlight.py
@@ -0,0 +1,60 @@
+# highlight.py - highlight extension implementation file
+#
+# Copyright 2007-2009 Adam Hupp <adam@hupp.org> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+#
+# The original module was split in an interface and an implementation
+# file to defer pygments loading and speedup extension setup.
+
+from mercurial import demandimport
+demandimport.ignore.extend(['pkgutil', 'pkg_resources', '__main__',])
+from mercurial import util, encoding
+
+from pygments import highlight
+from pygments.util import ClassNotFound
+from pygments.lexers import guess_lexer, guess_lexer_for_filename, TextLexer
+from pygments.formatters import HtmlFormatter
+
+SYNTAX_CSS = ('\n<link rel="stylesheet" href="{url}highlightcss" '
+ 'type="text/css" />')
+
+def pygmentize(field, fctx, style, tmpl):
+
+ # append a <link ...> to the syntax highlighting css
+ old_header = ''.join(tmpl('header'))
+ if SYNTAX_CSS not in old_header:
+ new_header = old_header + SYNTAX_CSS
+ tmpl.cache['header'] = new_header
+
+ text = fctx.data()
+ if util.binary(text):
+ return
+
+ # avoid UnicodeDecodeError in pygments
+ text = encoding.tolocal(text)
+
+ # To get multi-line strings right, we can't format line-by-line
+ try:
+ lexer = guess_lexer_for_filename(fctx.path(), text[:1024],
+ encoding=encoding.encoding)
+ except (ClassNotFound, ValueError):
+ try:
+ lexer = guess_lexer(text[:1024], encoding=encoding.encoding)
+ except (ClassNotFound, ValueError):
+ lexer = TextLexer(encoding=encoding.encoding)
+
+ formatter = HtmlFormatter(style=style, encoding=encoding.encoding)
+
+ colorized = highlight(text, lexer, formatter)
+ # strip wrapping div
+ colorized = colorized[:colorized.find('\n</pre>')]
+ colorized = colorized[colorized.find('<pre>')+5:]
+ coloriter = iter(colorized.splitlines())
+
+ tmpl.filters['colorize'] = lambda x: coloriter.next()
+
+ oldl = tmpl.cache[field]
+ newl = oldl.replace('line|escape', 'line|colorize')
+ tmpl.cache[field] = newl
diff --git a/sys/src/cmd/hg/hgext/inotify/__init__.py b/sys/src/cmd/hg/hgext/inotify/__init__.py
new file mode 100644
index 000000000..cc952c2c6
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/inotify/__init__.py
@@ -0,0 +1,109 @@
+# __init__.py - inotify-based status acceleration for Linux
+#
+# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
+# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''accelerate status report using Linux's inotify service'''
+
+# todo: socket permissions
+
+from mercurial.i18n import _
+from mercurial import cmdutil, util
+import server
+from weakref import proxy
+from client import client, QueryFailed
+
+def serve(ui, repo, **opts):
+ '''start an inotify server for this repository'''
+ timeout = opts.get('timeout')
+ if timeout:
+ timeout = float(timeout) * 1e3
+
+ class service(object):
+ def init(self):
+ try:
+ self.master = server.master(ui, repo.dirstate,
+ repo.root, timeout)
+ except server.AlreadyStartedException, inst:
+ raise util.Abort(str(inst))
+
+ def run(self):
+ try:
+ self.master.run()
+ finally:
+ self.master.shutdown()
+
+ service = service()
+ logfile = ui.config('inotify', 'log')
+ cmdutil.service(opts, initfn=service.init, runfn=service.run,
+ logfile=logfile)
+
+def debuginotify(ui, repo, **opts):
+ '''debugging information for inotify extension
+
+ Prints the list of directories being watched by the inotify server.
+ '''
+ cli = client(ui, repo)
+ response = cli.debugquery()
+
+ ui.write(_('directories being watched:\n'))
+ for path in response:
+ ui.write((' %s/\n') % path)
+
+def reposetup(ui, repo):
+ if not hasattr(repo, 'dirstate'):
+ return
+
+ class inotifydirstate(repo.dirstate.__class__):
+
+ # We'll set this to false after an unsuccessful attempt so that
+ # next calls of status() within the same instance don't try again
+ # to start an inotify server if it won't start.
+ _inotifyon = True
+
+ def status(self, match, ignored, clean, unknown=True):
+ files = match.files()
+ if '.' in files:
+ files = []
+ if self._inotifyon and not ignored:
+ cli = client(ui, repo)
+ try:
+ result = cli.statusquery(files, match, False,
+ clean, unknown)
+ except QueryFailed, instr:
+ ui.debug(str(instr))
+ # don't retry within the same hg instance
+ inotifydirstate._inotifyon = False
+ pass
+ else:
+ if ui.config('inotify', 'debug'):
+ r2 = super(inotifydirstate, self).status(
+ match, False, clean, unknown)
+ for c,a,b in zip('LMARDUIC', result, r2):
+ for f in a:
+ if f not in b:
+ ui.warn('*** inotify: %s +%s\n' % (c, f))
+ for f in b:
+ if f not in a:
+ ui.warn('*** inotify: %s -%s\n' % (c, f))
+ result = r2
+ return result
+ return super(inotifydirstate, self).status(
+ match, ignored, clean, unknown)
+
+ repo.dirstate.__class__ = inotifydirstate
+
+cmdtable = {
+ 'debuginotify':
+ (debuginotify, [], ('hg debuginotify')),
+ '^inserve':
+ (serve,
+ [('d', 'daemon', None, _('run server in background')),
+ ('', 'daemon-pipefds', '', _('used internally by daemon mode')),
+ ('t', 'idle-timeout', '', _('minutes to sit idle before exiting')),
+ ('', 'pid-file', '', _('name of file to write process ID to'))],
+ _('hg inserve [OPTION]...')),
+ }
diff --git a/sys/src/cmd/hg/hgext/inotify/client.py b/sys/src/cmd/hg/hgext/inotify/client.py
new file mode 100644
index 000000000..800d4a3aa
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/inotify/client.py
@@ -0,0 +1,160 @@
+# client.py - inotify status client
+#
+# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
+# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
+# Copyright 2009 Nicolas Dumazet <nicdumz@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from mercurial.i18n import _
+import common, server
+import errno, os, socket, struct
+
+class QueryFailed(Exception): pass
+
+def start_server(function):
+ """
+ Decorator.
+ Tries to call function, if it fails, try to (re)start inotify server.
+ Raise QueryFailed if something went wrong
+ """
+ def decorated_function(self, *args):
+ result = None
+ try:
+ return function(self, *args)
+ except (OSError, socket.error), err:
+ autostart = self.ui.configbool('inotify', 'autostart', True)
+
+ if err[0] == errno.ECONNREFUSED:
+ self.ui.warn(_('(found dead inotify server socket; '
+ 'removing it)\n'))
+ os.unlink(os.path.join(self.root, '.hg', 'inotify.sock'))
+ if err[0] in (errno.ECONNREFUSED, errno.ENOENT) and autostart:
+ self.ui.debug(_('(starting inotify server)\n'))
+ try:
+ try:
+ server.start(self.ui, self.dirstate, self.root)
+ except server.AlreadyStartedException, inst:
+ # another process may have started its own
+ # inotify server while this one was starting.
+ self.ui.debug(str(inst))
+ except Exception, inst:
+ self.ui.warn(_('could not start inotify server: '
+ '%s\n') % inst)
+ else:
+ try:
+ return function(self, *args)
+ except socket.error, err:
+ self.ui.warn(_('could not talk to new inotify '
+ 'server: %s\n') % err[-1])
+ elif err[0] in (errno.ECONNREFUSED, errno.ENOENT):
+ # silently ignore normal errors if autostart is False
+ self.ui.debug(_('(inotify server not running)\n'))
+ else:
+ self.ui.warn(_('failed to contact inotify server: %s\n')
+ % err[-1])
+
+ self.ui.traceback()
+ raise QueryFailed('inotify query failed')
+
+ return decorated_function
+
+
+class client(object):
+ def __init__(self, ui, repo):
+ self.ui = ui
+ self.dirstate = repo.dirstate
+ self.root = repo.root
+ self.sock = socket.socket(socket.AF_UNIX)
+
+ def _connect(self):
+ sockpath = os.path.join(self.root, '.hg', 'inotify.sock')
+ try:
+ self.sock.connect(sockpath)
+ except socket.error, err:
+ if err[0] == "AF_UNIX path too long":
+ sockpath = os.readlink(sockpath)
+ self.sock.connect(sockpath)
+ else:
+ raise
+
+ def _send(self, type, data):
+ """Sends protocol version number, and the data"""
+ self.sock.sendall(chr(common.version) + type + data)
+
+ self.sock.shutdown(socket.SHUT_WR)
+
+ def _receive(self, type):
+ """
+ Read data, check version number, extract headers,
+ and returns a tuple (data descriptor, header)
+ Raises QueryFailed on error
+ """
+ cs = common.recvcs(self.sock)
+ try:
+ version = ord(cs.read(1))
+ except TypeError:
+ # empty answer, assume the server crashed
+ self.ui.warn(_('received empty answer from inotify server'))
+ raise QueryFailed('server crashed')
+
+ if version != common.version:
+ self.ui.warn(_('(inotify: received response from incompatible '
+ 'server version %d)\n') % version)
+ raise QueryFailed('incompatible server version')
+
+ readtype = cs.read(4)
+ if readtype != type:
+ self.ui.warn(_('(inotify: received \'%s\' response when expecting'
+ ' \'%s\')\n') % (readtype, type))
+ raise QueryFailed('wrong response type')
+
+ hdrfmt = common.resphdrfmts[type]
+ hdrsize = common.resphdrsizes[type]
+ try:
+ resphdr = struct.unpack(hdrfmt, cs.read(hdrsize))
+ except struct.error:
+ raise QueryFailed('unable to retrieve query response headers')
+
+ return cs, resphdr
+
+ def query(self, type, req):
+ self._connect()
+
+ self._send(type, req)
+
+ return self._receive(type)
+
+ @start_server
+ def statusquery(self, names, match, ignored, clean, unknown=True):
+
+ def genquery():
+ for n in names:
+ yield n
+ states = 'almrx!'
+ if ignored:
+ raise ValueError('this is insanity')
+ if clean: states += 'c'
+ if unknown: states += '?'
+ yield states
+
+ req = '\0'.join(genquery())
+
+ cs, resphdr = self.query('STAT', req)
+
+ def readnames(nbytes):
+ if nbytes:
+ names = cs.read(nbytes)
+ if names:
+ return filter(match, names.split('\0'))
+ return []
+ return map(readnames, resphdr)
+
+ @start_server
+ def debugquery(self):
+ cs, resphdr = self.query('DBUG', '')
+
+ nbytes = resphdr[0]
+ names = cs.read(nbytes)
+ return names.split('\0')
diff --git a/sys/src/cmd/hg/hgext/inotify/common.py b/sys/src/cmd/hg/hgext/inotify/common.py
new file mode 100644
index 000000000..2b18b5f12
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/inotify/common.py
@@ -0,0 +1,51 @@
+# server.py - inotify common protocol code
+#
+# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
+# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import cStringIO, socket, struct
+
+"""
+ Protocol between inotify clients and server:
+
+ Client sending query:
+ 1) send protocol version number
+ 2) send query type (string, 4 letters long)
+ 3) send query parameters:
+ - For STAT, N+1 \0-separated strings:
+ 1) N different names that need checking
+ 2) 1 string containing all the status types to match
+ - No parameter needed for DBUG
+
+ Server sending query answer:
+ 1) send protocol version number
+ 2) send query type
+ 3) send struct.pack'ed headers describing the length of the content:
+ e.g. for STAT, receive 8 integers describing the length of the
+ 8 \0-separated string lists ( one list for each lmar!?ic status type )
+
+"""
+
+version = 2
+
+resphdrfmts = {
+ 'STAT': '>llllllll', # status requests
+ 'DBUG': '>l' # debugging queries
+}
+resphdrsizes = dict((k, struct.calcsize(v))
+ for k, v in resphdrfmts.iteritems())
+
+def recvcs(sock):
+ cs = cStringIO.StringIO()
+ s = True
+ try:
+ while s:
+ s = sock.recv(65536)
+ cs.write(s)
+ finally:
+ sock.shutdown(socket.SHUT_RD)
+ cs.seek(0)
+ return cs
diff --git a/sys/src/cmd/hg/hgext/inotify/linux/__init__.py b/sys/src/cmd/hg/hgext/inotify/linux/__init__.py
new file mode 100644
index 000000000..2fae16ab3
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/inotify/linux/__init__.py
@@ -0,0 +1,41 @@
+# __init__.py - low-level interfaces to the Linux inotify subsystem
+
+# Copyright 2006 Bryan O'Sullivan <bos@serpentine.com>
+
+# This library is free software; you can redistribute it and/or modify
+# it under the terms of version 2.1 of the GNU Lesser General Public
+# License, incorporated herein by reference.
+
+'''Low-level interface to the Linux inotify subsystem.
+
+The inotify subsystem provides an efficient mechanism for file status
+monitoring and change notification.
+
+This package provides the low-level inotify system call interface and
+associated constants and helper functions.
+
+For a higher-level interface that remains highly efficient, use the
+inotify.watcher package.'''
+
+__author__ = "Bryan O'Sullivan <bos@serpentine.com>"
+
+from _inotify import *
+
+procfs_path = '/proc/sys/fs/inotify'
+
+def _read_procfs_value(name):
+ def read_value():
+ try:
+ return int(open(procfs_path + '/' + name).read())
+ except OSError:
+ return None
+
+ read_value.__doc__ = '''Return the value of the %s setting from /proc.
+
+ If inotify is not enabled on this system, return None.''' % name
+
+ return read_value
+
+max_queued_events = _read_procfs_value('max_queued_events')
+max_user_instances = _read_procfs_value('max_user_instances')
+max_user_watches = _read_procfs_value('max_user_watches')
diff --git a/sys/src/cmd/hg/hgext/inotify/linux/_inotify.c b/sys/src/cmd/hg/hgext/inotify/linux/_inotify.c
new file mode 100644
index 000000000..42502aa0c
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/inotify/linux/_inotify.c
@@ -0,0 +1,608 @@
+/*
+ * _inotify.c - Python extension interfacing to the Linux inotify subsystem
+ *
+ * Copyright 2006 Bryan O'Sullivan <bos@serpentine.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of version 2.1 of the GNU Lesser General
+ * Public License, incorporated herein by reference.
+ */
+
+#include <Python.h>
+#include <alloca.h>
+#include <sys/inotify.h>
+#include <stdint.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+static PyObject *init(PyObject *self, PyObject *args)
+{
+ PyObject *ret = NULL;
+ int fd = -1;
+
+ if (!PyArg_ParseTuple(args, ":init"))
+ goto bail;
+
+ Py_BEGIN_ALLOW_THREADS
+ fd = inotify_init();
+ Py_END_ALLOW_THREADS
+
+ if (fd == -1) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ goto bail;
+ }
+
+ ret = PyInt_FromLong(fd);
+ if (ret == NULL)
+ goto bail;
+
+ goto done;
+
+bail:
+ if (fd != -1)
+ close(fd);
+
+ Py_CLEAR(ret);
+
+done:
+ return ret;
+}
+
+PyDoc_STRVAR(
+ init_doc,
+ "init() -> fd\n"
+ "\n"
+ "Initialise an inotify instance.\n"
+ "Return a file descriptor associated with a new inotify event queue.");
+
+static PyObject *add_watch(PyObject *self, PyObject *args)
+{
+ PyObject *ret = NULL;
+ uint32_t mask;
+ int wd = -1;
+ char *path;
+ int fd;
+
+ if (!PyArg_ParseTuple(args, "isI:add_watch", &fd, &path, &mask))
+ goto bail;
+
+ Py_BEGIN_ALLOW_THREADS
+ wd = inotify_add_watch(fd, path, mask);
+ Py_END_ALLOW_THREADS
+
+ if (wd == -1) {
+ PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
+ goto bail;
+ }
+
+ ret = PyInt_FromLong(wd);
+ if (ret == NULL)
+ goto bail;
+
+ goto done;
+
+bail:
+ if (wd != -1)
+ inotify_rm_watch(fd, wd);
+
+ Py_CLEAR(ret);
+
+done:
+ return ret;
+}
+
+PyDoc_STRVAR(
+ add_watch_doc,
+ "add_watch(fd, path, mask) -> wd\n"
+ "\n"
+ "Add a watch to an inotify instance, or modify an existing watch.\n"
+ "\n"
+ " fd: file descriptor returned by init()\n"
+ " path: path to watch\n"
+ " mask: mask of events to watch for\n"
+ "\n"
+ "Return a unique numeric watch descriptor for the inotify instance\n"
+ "mapped by the file descriptor.");
+
+static PyObject *remove_watch(PyObject *self, PyObject *args)
+{
+ PyObject *ret = NULL;
+ uint32_t wd;
+ int fd;
+ int r;
+
+ if (!PyArg_ParseTuple(args, "iI:remove_watch", &fd, &wd))
+ goto bail;
+
+ Py_BEGIN_ALLOW_THREADS
+ r = inotify_rm_watch(fd, wd);
+ Py_END_ALLOW_THREADS
+
+ if (r == -1) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ goto bail;
+ }
+
+ Py_INCREF(Py_None);
+
+ goto done;
+
+bail:
+ Py_CLEAR(ret);
+
+done:
+ return ret;
+}
+
+PyDoc_STRVAR(
+ remove_watch_doc,
+ "remove_watch(fd, wd)\n"
+ "\n"
+ " fd: file descriptor returned by init()\n"
+ " wd: watch descriptor returned by add_watch()\n"
+ "\n"
+ "Remove a watch associated with the watch descriptor wd from the\n"
+ "inotify instance associated with the file descriptor fd.\n"
+ "\n"
+ "Removing a watch causes an IN_IGNORED event to be generated for this\n"
+ "watch descriptor.");
+
+#define bit_name(x) {x, #x}
+
+static struct {
+ int bit;
+ const char *name;
+ PyObject *pyname;
+} bit_names[] = {
+ bit_name(IN_ACCESS),
+ bit_name(IN_MODIFY),
+ bit_name(IN_ATTRIB),
+ bit_name(IN_CLOSE_WRITE),
+ bit_name(IN_CLOSE_NOWRITE),
+ bit_name(IN_OPEN),
+ bit_name(IN_MOVED_FROM),
+ bit_name(IN_MOVED_TO),
+ bit_name(IN_CREATE),
+ bit_name(IN_DELETE),
+ bit_name(IN_DELETE_SELF),
+ bit_name(IN_MOVE_SELF),
+ bit_name(IN_UNMOUNT),
+ bit_name(IN_Q_OVERFLOW),
+ bit_name(IN_IGNORED),
+ bit_name(IN_ONLYDIR),
+ bit_name(IN_DONT_FOLLOW),
+ bit_name(IN_MASK_ADD),
+ bit_name(IN_ISDIR),
+ bit_name(IN_ONESHOT),
+ {0}
+};
+
+static PyObject *decode_mask(int mask)
+{
+ PyObject *ret = PyList_New(0);
+ int i;
+
+ if (ret == NULL)
+ goto bail;
+
+ for (i = 0; bit_names[i].bit; i++) {
+ if (mask & bit_names[i].bit) {
+ if (bit_names[i].pyname == NULL) {
+ bit_names[i].pyname = PyString_FromString(bit_names[i].name);
+ if (bit_names[i].pyname == NULL)
+ goto bail;
+ }
+ Py_INCREF(bit_names[i].pyname);
+ if (PyList_Append(ret, bit_names[i].pyname) == -1)
+ goto bail;
+ }
+ }
+
+ goto done;
+
+bail:
+ Py_CLEAR(ret);
+
+done:
+ return ret;
+}
+
+static PyObject *pydecode_mask(PyObject *self, PyObject *args)
+{
+ int mask;
+
+ if (!PyArg_ParseTuple(args, "i:decode_mask", &mask))
+ return NULL;
+
+ return decode_mask(mask);
+}
+
+PyDoc_STRVAR(
+ decode_mask_doc,
+ "decode_mask(mask) -> list_of_strings\n"
+ "\n"
+ "Decode an inotify mask value into a list of strings that give the\n"
+ "name of each bit set in the mask.");
+
+static char doc[] = "Low-level inotify interface wrappers.";
+
+static void define_const(PyObject *dict, const char *name, uint32_t val)
+{
+ PyObject *pyval = PyInt_FromLong(val);
+ PyObject *pyname = PyString_FromString(name);
+
+ if (!pyname || !pyval)
+ goto bail;
+
+ PyDict_SetItem(dict, pyname, pyval);
+
+bail:
+ Py_XDECREF(pyname);
+ Py_XDECREF(pyval);
+}
+
+static void define_consts(PyObject *dict)
+{
+ define_const(dict, "IN_ACCESS", IN_ACCESS);
+ define_const(dict, "IN_MODIFY", IN_MODIFY);
+ define_const(dict, "IN_ATTRIB", IN_ATTRIB);
+ define_const(dict, "IN_CLOSE_WRITE", IN_CLOSE_WRITE);
+ define_const(dict, "IN_CLOSE_NOWRITE", IN_CLOSE_NOWRITE);
+ define_const(dict, "IN_OPEN", IN_OPEN);
+ define_const(dict, "IN_MOVED_FROM", IN_MOVED_FROM);
+ define_const(dict, "IN_MOVED_TO", IN_MOVED_TO);
+
+ define_const(dict, "IN_CLOSE", IN_CLOSE);
+ define_const(dict, "IN_MOVE", IN_MOVE);
+
+ define_const(dict, "IN_CREATE", IN_CREATE);
+ define_const(dict, "IN_DELETE", IN_DELETE);
+ define_const(dict, "IN_DELETE_SELF", IN_DELETE_SELF);
+ define_const(dict, "IN_MOVE_SELF", IN_MOVE_SELF);
+ define_const(dict, "IN_UNMOUNT", IN_UNMOUNT);
+ define_const(dict, "IN_Q_OVERFLOW", IN_Q_OVERFLOW);
+ define_const(dict, "IN_IGNORED", IN_IGNORED);
+
+ define_const(dict, "IN_ONLYDIR", IN_ONLYDIR);
+ define_const(dict, "IN_DONT_FOLLOW", IN_DONT_FOLLOW);
+ define_const(dict, "IN_MASK_ADD", IN_MASK_ADD);
+ define_const(dict, "IN_ISDIR", IN_ISDIR);
+ define_const(dict, "IN_ONESHOT", IN_ONESHOT);
+ define_const(dict, "IN_ALL_EVENTS", IN_ALL_EVENTS);
+}
+
+struct event {
+ PyObject_HEAD
+ PyObject *wd;
+ PyObject *mask;
+ PyObject *cookie;
+ PyObject *name;
+};
+
+static PyObject *event_wd(PyObject *self, void *x)
+{
+ struct event *evt = (struct event *) self;
+ Py_INCREF(evt->wd);
+ return evt->wd;
+}
+
+static PyObject *event_mask(PyObject *self, void *x)
+{
+ struct event *evt = (struct event *) self;
+ Py_INCREF(evt->mask);
+ return evt->mask;
+}
+
+static PyObject *event_cookie(PyObject *self, void *x)
+{
+ struct event *evt = (struct event *) self;
+ Py_INCREF(evt->cookie);
+ return evt->cookie;
+}
+
+static PyObject *event_name(PyObject *self, void *x)
+{
+ struct event *evt = (struct event *) self;
+ Py_INCREF(evt->name);
+ return evt->name;
+}
+
+static struct PyGetSetDef event_getsets[] = {
+ {"wd", event_wd, NULL,
+ "watch descriptor"},
+ {"mask", event_mask, NULL,
+ "event mask"},
+ {"cookie", event_cookie, NULL,
+ "rename cookie, if rename-related event"},
+ {"name", event_name, NULL,
+ "file name"},
+ {NULL}
+};
+
+PyDoc_STRVAR(
+ event_doc,
+ "event: Structure describing an inotify event.");
+
+static PyObject *event_new(PyTypeObject *t, PyObject *a, PyObject *k)
+{
+ return (*t->tp_alloc)(t, 0);
+}
+
+static void event_dealloc(struct event *evt)
+{
+ Py_XDECREF(evt->wd);
+ Py_XDECREF(evt->mask);
+ Py_XDECREF(evt->cookie);
+ Py_XDECREF(evt->name);
+
+ (*evt->ob_type->tp_free)(evt);
+}
+
+static PyObject *event_repr(struct event *evt)
+{
+ int wd = PyInt_AsLong(evt->wd);
+ int cookie = evt->cookie == Py_None ? -1 : PyInt_AsLong(evt->cookie);
+ PyObject *ret = NULL, *pymasks = NULL, *pymask = NULL;
+ PyObject *join = NULL;
+ char *maskstr;
+
+ join = PyString_FromString("|");
+ if (join == NULL)
+ goto bail;
+
+ pymasks = decode_mask(PyInt_AsLong(evt->mask));
+ if (pymasks == NULL)
+ goto bail;
+
+ pymask = _PyString_Join(join, pymasks);
+ if (pymask == NULL)
+ goto bail;
+
+ maskstr = PyString_AsString(pymask);
+
+ if (evt->name != Py_None) {
+ PyObject *pyname = PyString_Repr(evt->name, 1);
+ char *name = pyname ? PyString_AsString(pyname) : "???";
+
+ if (cookie == -1)
+ ret = PyString_FromFormat("event(wd=%d, mask=%s, name=%s)",
+ wd, maskstr, name);
+ else
+ ret = PyString_FromFormat("event(wd=%d, mask=%s, "
+ "cookie=0x%x, name=%s)",
+ wd, maskstr, cookie, name);
+
+ Py_XDECREF(pyname);
+ } else {
+ if (cookie == -1)
+ ret = PyString_FromFormat("event(wd=%d, mask=%s)",
+ wd, maskstr);
+ else {
+ ret = PyString_FromFormat("event(wd=%d, mask=%s, cookie=0x%x)",
+ wd, maskstr, cookie);
+ }
+ }
+
+ goto done;
+bail:
+ Py_CLEAR(ret);
+
+done:
+ Py_XDECREF(pymask);
+ Py_XDECREF(pymasks);
+ Py_XDECREF(join);
+
+ return ret;
+}
+
+static PyTypeObject event_type = {
+ PyObject_HEAD_INIT(NULL)
+ 0, /*ob_size*/
+ "_inotify.event", /*tp_name*/
+ sizeof(struct event), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ (destructor)event_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ (reprfunc)event_repr, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash */
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
+ event_doc, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ 0, /* tp_methods */
+ 0, /* tp_members */
+ event_getsets, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ event_new, /* tp_new */
+};
+
+PyObject *read_events(PyObject *self, PyObject *args)
+{
+ PyObject *ctor_args = NULL;
+ PyObject *pybufsize = NULL;
+ PyObject *ret = NULL;
+ int bufsize = 65536;
+ char *buf = NULL;
+ int nread, pos;
+ int fd;
+
+ if (!PyArg_ParseTuple(args, "i|O:read", &fd, &pybufsize))
+ goto bail;
+
+ if (pybufsize && pybufsize != Py_None)
+ bufsize = PyInt_AsLong(pybufsize);
+
+ ret = PyList_New(0);
+ if (ret == NULL)
+ goto bail;
+
+ if (bufsize <= 0) {
+ int r;
+
+ Py_BEGIN_ALLOW_THREADS
+ r = ioctl(fd, FIONREAD, &bufsize);
+ Py_END_ALLOW_THREADS
+
+ if (r == -1) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ goto bail;
+ }
+ if (bufsize == 0)
+ goto done;
+ }
+ else {
+ static long name_max;
+ static long name_fd = -1;
+ long min;
+
+ if (name_fd != fd) {
+ name_fd = fd;
+ Py_BEGIN_ALLOW_THREADS
+ name_max = fpathconf(fd, _PC_NAME_MAX);
+ Py_END_ALLOW_THREADS
+ }
+
+ min = sizeof(struct inotify_event) + name_max + 1;
+
+ if (bufsize < min) {
+ PyErr_Format(PyExc_ValueError, "bufsize must be at least %d",
+ (int) min);
+ goto bail;
+ }
+ }
+
+ buf = alloca(bufsize);
+
+ Py_BEGIN_ALLOW_THREADS
+ nread = read(fd, buf, bufsize);
+ Py_END_ALLOW_THREADS
+
+ if (nread == -1) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ goto bail;
+ }
+
+ ctor_args = PyTuple_New(0);
+
+ if (ctor_args == NULL)
+ goto bail;
+
+ pos = 0;
+
+ while (pos < nread) {
+ struct inotify_event *in = (struct inotify_event *) (buf + pos);
+ struct event *evt;
+ PyObject *obj;
+
+ obj = PyObject_CallObject((PyObject *) &event_type, ctor_args);
+
+ if (obj == NULL)
+ goto bail;
+
+ evt = (struct event *) obj;
+
+ evt->wd = PyInt_FromLong(in->wd);
+ evt->mask = PyInt_FromLong(in->mask);
+ if (in->mask & IN_MOVE)
+ evt->cookie = PyInt_FromLong(in->cookie);
+ else {
+ Py_INCREF(Py_None);
+ evt->cookie = Py_None;
+ }
+ if (in->len)
+ evt->name = PyString_FromString(in->name);
+ else {
+ Py_INCREF(Py_None);
+ evt->name = Py_None;
+ }
+
+ if (!evt->wd || !evt->mask || !evt->cookie || !evt->name)
+ goto mybail;
+
+ if (PyList_Append(ret, obj) == -1)
+ goto mybail;
+
+ pos += sizeof(struct inotify_event) + in->len;
+ continue;
+
+ mybail:
+ Py_CLEAR(evt->wd);
+ Py_CLEAR(evt->mask);
+ Py_CLEAR(evt->cookie);
+ Py_CLEAR(evt->name);
+ Py_DECREF(obj);
+
+ goto bail;
+ }
+
+ goto done;
+
+bail:
+ Py_CLEAR(ret);
+
+done:
+ Py_XDECREF(ctor_args);
+
+ return ret;
+}
+
+PyDoc_STRVAR(
+ read_doc,
+ "read(fd, bufsize[=65536]) -> list_of_events\n"
+ "\n"
+ "\nRead inotify events from a file descriptor.\n"
+ "\n"
+ " fd: file descriptor returned by init()\n"
+ " bufsize: size of buffer to read into, in bytes\n"
+ "\n"
+ "Return a list of event objects.\n"
+ "\n"
+ "If bufsize is > 0, block until events are available to be read.\n"
+ "Otherwise, immediately return all events that can be read without\n"
+ "blocking.");
+
+
+static PyMethodDef methods[] = {
+ {"init", init, METH_VARARGS, init_doc},
+ {"add_watch", add_watch, METH_VARARGS, add_watch_doc},
+ {"remove_watch", remove_watch, METH_VARARGS, remove_watch_doc},
+ {"read", read_events, METH_VARARGS, read_doc},
+ {"decode_mask", pydecode_mask, METH_VARARGS, decode_mask_doc},
+ {NULL},
+};
+
+void init_inotify(void)
+{
+ PyObject *mod, *dict;
+
+ if (PyType_Ready(&event_type) == -1)
+ return;
+
+ mod = Py_InitModule3("_inotify", methods, doc);
+
+ dict = PyModule_GetDict(mod);
+
+ if (dict)
+ define_consts(dict);
+}
diff --git a/sys/src/cmd/hg/hgext/inotify/linux/watcher.py b/sys/src/cmd/hg/hgext/inotify/linux/watcher.py
new file mode 100644
index 000000000..5695f8686
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/inotify/linux/watcher.py
@@ -0,0 +1,335 @@
+# watcher.py - high-level interfaces to the Linux inotify subsystem
+
+# Copyright 2006 Bryan O'Sullivan <bos@serpentine.com>
+
+# This library is free software; you can redistribute it and/or modify
+# it under the terms of version 2.1 of the GNU Lesser General Public
+# License, incorporated herein by reference.
+
+'''High-level interfaces to the Linux inotify subsystem.
+
+The inotify subsystem provides an efficient mechanism for file status
+monitoring and change notification.
+
+The watcher class hides the low-level details of the inotify
+interface, and provides a Pythonic wrapper around it. It generates
+events that provide somewhat more information than raw inotify makes
+available.
+
+The autowatcher class is more useful, as it automatically watches
+newly-created directories on your behalf.'''
+
+__author__ = "Bryan O'Sullivan <bos@serpentine.com>"
+
+import _inotify as inotify
+import array
+import errno
+import fcntl
+import os
+import termios
+
+
+class event(object):
+ '''Derived inotify event class.
+
+ The following fields are available:
+
+ mask: event mask, indicating what kind of event this is
+
+ cookie: rename cookie, if a rename-related event
+
+ path: path of the directory in which the event occurred
+
+ name: name of the directory entry to which the event occurred
+ (may be None if the event happened to a watched directory)
+
+ fullpath: complete path at which the event occurred
+
+ wd: watch descriptor that triggered this event'''
+
+ __slots__ = (
+ 'cookie',
+ 'fullpath',
+ 'mask',
+ 'name',
+ 'path',
+ 'raw',
+ 'wd',
+ )
+
+ def __init__(self, raw, path):
+ self.path = path
+ self.raw = raw
+ if raw.name:
+ self.fullpath = path + '/' + raw.name
+ else:
+ self.fullpath = path
+
+ self.wd = raw.wd
+ self.mask = raw.mask
+ self.cookie = raw.cookie
+ self.name = raw.name
+
+ def __repr__(self):
+ r = repr(self.raw)
+ return 'event(path=' + repr(self.path) + ', ' + r[r.find('(')+1:]
+
+
+_event_props = {
+ 'access': 'File was accessed',
+ 'modify': 'File was modified',
+ 'attrib': 'Attribute of a directory entry was changed',
+ 'close_write': 'File was closed after being written to',
+ 'close_nowrite': 'File was closed without being written to',
+ 'open': 'File was opened',
+ 'moved_from': 'Directory entry was renamed from this name',
+ 'moved_to': 'Directory entry was renamed to this name',
+ 'create': 'Directory entry was created',
+ 'delete': 'Directory entry was deleted',
+ 'delete_self': 'The watched directory entry was deleted',
+ 'move_self': 'The watched directory entry was renamed',
+ 'unmount': 'Directory was unmounted, and can no longer be watched',
+ 'q_overflow': 'Kernel dropped events due to queue overflow',
+ 'ignored': 'Directory entry is no longer being watched',
+ 'isdir': 'Event occurred on a directory',
+ }
+
+for k, v in _event_props.iteritems():
+ mask = getattr(inotify, 'IN_' + k.upper())
+ def getter(self):
+ return self.mask & mask
+ getter.__name__ = k
+ getter.__doc__ = v
+ setattr(event, k, property(getter, doc=v))
+
+del _event_props
+
+
+class watcher(object):
+ '''Provide a Pythonic interface to the low-level inotify API.
+
+ Also adds derived information to each event that is not available
+ through the normal inotify API, such as directory name.'''
+
+ __slots__ = (
+ 'fd',
+ '_paths',
+ '_wds',
+ )
+
+ def __init__(self):
+ '''Create a new inotify instance.'''
+
+ self.fd = inotify.init()
+ self._paths = {}
+ self._wds = {}
+
+ def fileno(self):
+ '''Return the file descriptor this watcher uses.
+
+ Useful for passing to select and poll.'''
+
+ return self.fd
+
+ def add(self, path, mask):
+ '''Add or modify a watch.
+
+ Return the watch descriptor added or modified.'''
+
+ path = os.path.normpath(path)
+ wd = inotify.add_watch(self.fd, path, mask)
+ self._paths[path] = wd, mask
+ self._wds[wd] = path, mask
+ return wd
+
+ def remove(self, wd):
+ '''Remove the given watch.'''
+
+ inotify.remove_watch(self.fd, wd)
+ self._remove(wd)
+
+ def _remove(self, wd):
+ path_mask = self._wds.pop(wd, None)
+ if path_mask is not None:
+ self._paths.pop(path_mask[0])
+
+ def path(self, path):
+ '''Return a (watch descriptor, event mask) pair for the given path.
+
+ If the path is not being watched, return None.'''
+
+ return self._paths.get(path)
+
+ def wd(self, wd):
+ '''Return a (path, event mask) pair for the given watch descriptor.
+
+ If the watch descriptor is not valid or not associated with
+ this watcher, return None.'''
+
+ return self._wds.get(wd)
+
+ def read(self, bufsize=None):
+ '''Read a list of queued inotify events.
+
+ If bufsize is zero, only return those events that can be read
+ immediately without blocking. Otherwise, block until events are
+ available.'''
+
+ events = []
+ for evt in inotify.read(self.fd, bufsize):
+ events.append(event(evt, self._wds[evt.wd][0]))
+ if evt.mask & inotify.IN_IGNORED:
+ self._remove(evt.wd)
+ elif evt.mask & inotify.IN_UNMOUNT:
+ self.close()
+ return events
+
+ def close(self):
+ '''Shut down this watcher.
+
+ All subsequent method calls are likely to raise exceptions.'''
+
+ os.close(self.fd)
+ self.fd = None
+ self._paths = None
+ self._wds = None
+
+ def __len__(self):
+ '''Return the number of active watches.'''
+
+ return len(self._paths)
+
+ def __iter__(self):
+ '''Yield a (path, watch descriptor, event mask) tuple for each
+ entry being watched.'''
+
+ for path, (wd, mask) in self._paths.iteritems():
+ yield path, wd, mask
+
+ def __del__(self):
+ if self.fd is not None:
+ os.close(self.fd)
+
+ ignored_errors = [errno.ENOENT, errno.EPERM, errno.ENOTDIR]
+
+ def add_iter(self, path, mask, onerror=None):
+ '''Add or modify watches over path and its subdirectories.
+
+ Yield each added or modified watch descriptor.
+
+ To ensure that this method runs to completion, you must
+ iterate over all of its results, even if you do not care what
+ they are. For example:
+
+ for wd in w.add_iter(path, mask):
+ pass
+
+ By default, errors are ignored. If optional arg "onerror" is
+ specified, it should be a function; it will be called with one
+ argument, an OSError instance. It can report the error to
+ continue with the walk, or raise the exception to abort the
+ walk.'''
+
+ # Add the IN_ONLYDIR flag to the event mask, to avoid a possible
+ # race when adding a subdirectory. In the time between the
+ # event being queued by the kernel and us processing it, the
+ # directory may have been deleted, or replaced with a different
+ # kind of entry with the same name.
+
+ submask = mask | inotify.IN_ONLYDIR
+
+ try:
+ yield self.add(path, mask)
+ except OSError, err:
+ if onerror and err.errno not in self.ignored_errors:
+ onerror(err)
+ for root, dirs, names in os.walk(path, topdown=False, onerror=onerror):
+ for d in dirs:
+ try:
+ yield self.add(root + '/' + d, submask)
+ except OSError, err:
+ if onerror and err.errno not in self.ignored_errors:
+ onerror(err)
+
+ def add_all(self, path, mask, onerror=None):
+ '''Add or modify watches over path and its subdirectories.
+
+ Return a list of added or modified watch descriptors.
+
+ By default, errors are ignored. If optional arg "onerror" is
+ specified, it should be a function; it will be called with one
+ argument, an OSError instance. It can report the error to
+ continue with the walk, or raise the exception to abort the
+ walk.'''
+
+ return [w for w in self.add_iter(path, mask, onerror)]
+
+
+class autowatcher(watcher):
+ '''watcher class that automatically watches newly created directories.'''
+
+ __slots__ = (
+ 'addfilter',
+ )
+
+ def __init__(self, addfilter=None):
+ '''Create a new inotify instance.
+
+ This instance will automatically watch newly created
+ directories.
+
+ If the optional addfilter parameter is not None, it must be a
+ callable that takes one parameter. It will be called each time
+ a directory is about to be automatically watched. If it returns
+ True, the directory will be watched if it still exists,
+ otherwise, it will beb skipped.'''
+
+ super(autowatcher, self).__init__()
+ self.addfilter = addfilter
+
+ _dir_create_mask = inotify.IN_ISDIR | inotify.IN_CREATE
+
+ def read(self, bufsize=None):
+ events = super(autowatcher, self).read(bufsize)
+ for evt in events:
+ if evt.mask & self._dir_create_mask == self._dir_create_mask:
+ if self.addfilter is None or self.addfilter(evt):
+ parentmask = self._wds[evt.wd][1]
+ # See note about race avoidance via IN_ONLYDIR above.
+ mask = parentmask | inotify.IN_ONLYDIR
+ try:
+ self.add_all(evt.fullpath, mask)
+ except OSError, err:
+ if err.errno not in self.ignored_errors:
+ raise
+ return events
+
+
+class threshold(object):
+ '''Class that indicates whether a file descriptor has reached a
+ threshold of readable bytes available.
+
+ This class is not thread-safe.'''
+
+ __slots__ = (
+ 'fd',
+ 'threshold',
+ '_iocbuf',
+ )
+
+ def __init__(self, fd, threshold=1024):
+ self.fd = fd
+ self.threshold = threshold
+ self._iocbuf = array.array('i', [0])
+
+ def readable(self):
+ '''Return the number of bytes readable on this file descriptor.'''
+
+ fcntl.ioctl(self.fd, termios.FIONREAD, self._iocbuf, True)
+ return self._iocbuf[0]
+
+ def __call__(self):
+ '''Indicate whether the number of readable bytes has met or
+ exceeded the threshold.'''
+
+ return self.readable() >= self.threshold
diff --git a/sys/src/cmd/hg/hgext/inotify/server.py b/sys/src/cmd/hg/hgext/inotify/server.py
new file mode 100644
index 000000000..75c00d632
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/inotify/server.py
@@ -0,0 +1,874 @@
+# server.py - inotify status server
+#
+# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
+# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from mercurial.i18n import _
+from mercurial import osutil, util
+import common
+import errno, os, select, socket, stat, struct, sys, tempfile, time
+
+try:
+ import linux as inotify
+ from linux import watcher
+except ImportError:
+ raise
+
+class AlreadyStartedException(Exception): pass
+
+def join(a, b):
+ if a:
+ if a[-1] == '/':
+ return a + b
+ return a + '/' + b
+ return b
+
+def split(path):
+ c = path.rfind('/')
+ if c == -1:
+ return '', path
+ return path[:c], path[c+1:]
+
+walk_ignored_errors = (errno.ENOENT, errno.ENAMETOOLONG)
+
+def walkrepodirs(dirstate, absroot):
+ '''Iterate over all subdirectories of this repo.
+ Exclude the .hg directory, any nested repos, and ignored dirs.'''
+ def walkit(dirname, top):
+ fullpath = join(absroot, dirname)
+ try:
+ for name, kind in osutil.listdir(fullpath):
+ if kind == stat.S_IFDIR:
+ if name == '.hg':
+ if not top:
+ return
+ else:
+ d = join(dirname, name)
+ if dirstate._ignore(d):
+ continue
+ for subdir in walkit(d, False):
+ yield subdir
+ except OSError, err:
+ if err.errno not in walk_ignored_errors:
+ raise
+ yield fullpath
+
+ return walkit('', True)
+
+def walk(dirstate, absroot, root):
+ '''Like os.walk, but only yields regular files.'''
+
+ # This function is critical to performance during startup.
+
+ def walkit(root, reporoot):
+ files, dirs = [], []
+
+ try:
+ fullpath = join(absroot, root)
+ for name, kind in osutil.listdir(fullpath):
+ if kind == stat.S_IFDIR:
+ if name == '.hg':
+ if not reporoot:
+ return
+ else:
+ dirs.append(name)
+ path = join(root, name)
+ if dirstate._ignore(path):
+ continue
+ for result in walkit(path, False):
+ yield result
+ elif kind in (stat.S_IFREG, stat.S_IFLNK):
+ files.append(name)
+ yield fullpath, dirs, files
+
+ except OSError, err:
+ if err.errno == errno.ENOTDIR:
+ # fullpath was a directory, but has since been replaced
+ # by a file.
+ yield fullpath, dirs, files
+ elif err.errno not in walk_ignored_errors:
+ raise
+
+ return walkit(root, root == '')
+
+def _explain_watch_limit(ui, dirstate, rootabs):
+ path = '/proc/sys/fs/inotify/max_user_watches'
+ try:
+ limit = int(file(path).read())
+ except IOError, err:
+ if err.errno != errno.ENOENT:
+ raise
+ raise util.Abort(_('this system does not seem to '
+ 'support inotify'))
+ ui.warn(_('*** the current per-user limit on the number '
+ 'of inotify watches is %s\n') % limit)
+ ui.warn(_('*** this limit is too low to watch every '
+ 'directory in this repository\n'))
+ ui.warn(_('*** counting directories: '))
+ ndirs = len(list(walkrepodirs(dirstate, rootabs)))
+ ui.warn(_('found %d\n') % ndirs)
+ newlimit = min(limit, 1024)
+ while newlimit < ((limit + ndirs) * 1.1):
+ newlimit *= 2
+ ui.warn(_('*** to raise the limit from %d to %d (run as root):\n') %
+ (limit, newlimit))
+ ui.warn(_('*** echo %d > %s\n') % (newlimit, path))
+ raise util.Abort(_('cannot watch %s until inotify watch limit is raised')
+ % rootabs)
+
+class pollable(object):
+ """
+ Interface to support polling.
+ The file descriptor returned by fileno() is registered to a polling
+ object.
+ Usage:
+ Every tick, check if an event has happened since the last tick:
+ * If yes, call handle_events
+ * If no, call handle_timeout
+ """
+ poll_events = select.POLLIN
+ instances = {}
+ poll = select.poll()
+
+ def fileno(self):
+ raise NotImplementedError
+
+ def handle_events(self, events):
+ raise NotImplementedError
+
+ def handle_timeout(self):
+ raise NotImplementedError
+
+ def shutdown(self):
+ raise NotImplementedError
+
+ def register(self, timeout):
+ fd = self.fileno()
+
+ pollable.poll.register(fd, pollable.poll_events)
+ pollable.instances[fd] = self
+
+ self.registered = True
+ self.timeout = timeout
+
+ def unregister(self):
+ pollable.poll.unregister(self)
+ self.registered = False
+
+ @classmethod
+ def run(cls):
+ while True:
+ timeout = None
+ timeobj = None
+ for obj in cls.instances.itervalues():
+ if obj.timeout is not None and (timeout is None or obj.timeout < timeout):
+ timeout, timeobj = obj.timeout, obj
+ try:
+ events = cls.poll.poll(timeout)
+ except select.error, err:
+ if err[0] == errno.EINTR:
+ continue
+ raise
+ if events:
+ by_fd = {}
+ for fd, event in events:
+ by_fd.setdefault(fd, []).append(event)
+
+ for fd, events in by_fd.iteritems():
+ cls.instances[fd].handle_pollevents(events)
+
+ elif timeobj:
+ timeobj.handle_timeout()
+
+def eventaction(code):
+ """
+ Decorator to help handle events in repowatcher
+ """
+ def decorator(f):
+ def wrapper(self, wpath):
+ if code == 'm' and wpath in self.lastevent and \
+ self.lastevent[wpath] in 'cm':
+ return
+ self.lastevent[wpath] = code
+ self.timeout = 250
+
+ f(self, wpath)
+
+ wrapper.func_name = f.func_name
+ return wrapper
+ return decorator
+
+class directory(object):
+ """
+ Representing a directory
+
+ * path is the relative path from repo root to this directory
+ * files is a dict listing the files in this directory
+ - keys are file names
+ - values are file status
+ * dirs is a dict listing the subdirectories
+ - key are subdirectories names
+ - values are directory objects
+ """
+ def __init__(self, relpath=''):
+ self.path = relpath
+ self.files = {}
+ self.dirs = {}
+
+ def dir(self, relpath):
+ """
+ Returns the directory contained at the relative path relpath.
+ Creates the intermediate directories if necessary.
+ """
+ if not relpath:
+ return self
+ l = relpath.split('/')
+ ret = self
+ while l:
+ next = l.pop(0)
+ try:
+ ret = ret.dirs[next]
+ except KeyError:
+ d = directory(join(ret.path, next))
+ ret.dirs[next] = d
+ ret = d
+ return ret
+
+ def walk(self, states):
+ """
+ yield (filename, status) pairs for items in the trees
+ that have status in states.
+ filenames are relative to the repo root
+ """
+ for file, st in self.files.iteritems():
+ if st in states:
+ yield join(self.path, file), st
+ for dir in self.dirs.itervalues():
+ for e in dir.walk(states):
+ yield e
+
+ def lookup(self, states, path):
+ """
+ yield root-relative filenames that match path, and whose
+ status are in states:
+ * if path is a file, yield path
+ * if path is a directory, yield directory files
+ * if path is not tracked, yield nothing
+ """
+ if path[-1] == '/':
+ path = path[:-1]
+
+ paths = path.split('/')
+
+ # we need to check separately for last node
+ last = paths.pop()
+
+ tree = self
+ try:
+ for dir in paths:
+ tree = tree.dirs[dir]
+ except KeyError:
+ # path is not tracked
+ return
+
+ try:
+ # if path is a directory, walk it
+ for file, st in tree.dirs[last].walk(states):
+ yield file
+ except KeyError:
+ try:
+ if tree.files[last] in states:
+ # path is a file
+ yield path
+ except KeyError:
+ # path is not tracked
+ pass
+
+class repowatcher(pollable):
+ """
+ Watches inotify events
+ """
+ statuskeys = 'almr!?'
+ mask = (
+ inotify.IN_ATTRIB |
+ inotify.IN_CREATE |
+ inotify.IN_DELETE |
+ inotify.IN_DELETE_SELF |
+ inotify.IN_MODIFY |
+ inotify.IN_MOVED_FROM |
+ inotify.IN_MOVED_TO |
+ inotify.IN_MOVE_SELF |
+ inotify.IN_ONLYDIR |
+ inotify.IN_UNMOUNT |
+ 0)
+
+ def __init__(self, ui, dirstate, root):
+ self.ui = ui
+ self.dirstate = dirstate
+
+ self.wprefix = join(root, '')
+ self.prefixlen = len(self.wprefix)
+ try:
+ self.watcher = watcher.watcher()
+ except OSError, err:
+ raise util.Abort(_('inotify service not available: %s') %
+ err.strerror)
+ self.threshold = watcher.threshold(self.watcher)
+ self.fileno = self.watcher.fileno
+
+ self.tree = directory()
+ self.statcache = {}
+ self.statustrees = dict([(s, directory()) for s in self.statuskeys])
+
+ self.last_event = None
+
+ self.lastevent = {}
+
+ self.register(timeout=None)
+
+ self.ds_info = self.dirstate_info()
+ self.handle_timeout()
+ self.scan()
+
+ def event_time(self):
+ last = self.last_event
+ now = time.time()
+ self.last_event = now
+
+ if last is None:
+ return 'start'
+ delta = now - last
+ if delta < 5:
+ return '+%.3f' % delta
+ if delta < 50:
+ return '+%.2f' % delta
+ return '+%.1f' % delta
+
+ def dirstate_info(self):
+ try:
+ st = os.lstat(self.wprefix + '.hg/dirstate')
+ return st.st_mtime, st.st_ino
+ except OSError, err:
+ if err.errno != errno.ENOENT:
+ raise
+ return 0, 0
+
+ def add_watch(self, path, mask):
+ if not path:
+ return
+ if self.watcher.path(path) is None:
+ if self.ui.debugflag:
+ self.ui.note(_('watching %r\n') % path[self.prefixlen:])
+ try:
+ self.watcher.add(path, mask)
+ except OSError, err:
+ if err.errno in (errno.ENOENT, errno.ENOTDIR):
+ return
+ if err.errno != errno.ENOSPC:
+ raise
+ _explain_watch_limit(self.ui, self.dirstate, self.wprefix)
+
+ def setup(self):
+ self.ui.note(_('watching directories under %r\n') % self.wprefix)
+ self.add_watch(self.wprefix + '.hg', inotify.IN_DELETE)
+ self.check_dirstate()
+
+ def filestatus(self, fn, st):
+ try:
+ type_, mode, size, time = self.dirstate._map[fn][:4]
+ except KeyError:
+ type_ = '?'
+ if type_ == 'n':
+ st_mode, st_size, st_mtime = st
+ if size == -1:
+ return 'l'
+ if size and (size != st_size or (mode ^ st_mode) & 0100):
+ return 'm'
+ if time != int(st_mtime):
+ return 'l'
+ return 'n'
+ if type_ == '?' and self.dirstate._ignore(fn):
+ return 'i'
+ return type_
+
+ def updatefile(self, wfn, osstat):
+ '''
+ update the file entry of an existing file.
+
+ osstat: (mode, size, time) tuple, as returned by os.lstat(wfn)
+ '''
+
+ self._updatestatus(wfn, self.filestatus(wfn, osstat))
+
+ def deletefile(self, wfn, oldstatus):
+ '''
+ update the entry of a file which has been deleted.
+
+ oldstatus: char in statuskeys, status of the file before deletion
+ '''
+ if oldstatus == 'r':
+ newstatus = 'r'
+ elif oldstatus in 'almn':
+ newstatus = '!'
+ else:
+ newstatus = None
+
+ self.statcache.pop(wfn, None)
+ self._updatestatus(wfn, newstatus)
+
+ def _updatestatus(self, wfn, newstatus):
+ '''
+ Update the stored status of a file.
+
+ newstatus: - char in (statuskeys + 'ni'), new status to apply.
+ - or None, to stop tracking wfn
+ '''
+ root, fn = split(wfn)
+ d = self.tree.dir(root)
+
+ oldstatus = d.files.get(fn)
+ # oldstatus can be either:
+ # - None : fn is new
+ # - a char in statuskeys: fn is a (tracked) file
+
+ if self.ui.debugflag and oldstatus != newstatus:
+ self.ui.note(_('status: %r %s -> %s\n') %
+ (wfn, oldstatus, newstatus))
+
+ if oldstatus and oldstatus in self.statuskeys \
+ and oldstatus != newstatus:
+ del self.statustrees[oldstatus].dir(root).files[fn]
+
+ if newstatus in (None, 'i'):
+ d.files.pop(fn, None)
+ elif oldstatus != newstatus:
+ d.files[fn] = newstatus
+ if newstatus != 'n':
+ self.statustrees[newstatus].dir(root).files[fn] = newstatus
+
+
+ def check_deleted(self, key):
+ # Files that had been deleted but were present in the dirstate
+ # may have vanished from the dirstate; we must clean them up.
+ nuke = []
+ for wfn, ignore in self.statustrees[key].walk(key):
+ if wfn not in self.dirstate:
+ nuke.append(wfn)
+ for wfn in nuke:
+ root, fn = split(wfn)
+ del self.statustrees[key].dir(root).files[fn]
+ del self.tree.dir(root).files[fn]
+
+ def scan(self, topdir=''):
+ ds = self.dirstate._map.copy()
+ self.add_watch(join(self.wprefix, topdir), self.mask)
+ for root, dirs, files in walk(self.dirstate, self.wprefix, topdir):
+ for d in dirs:
+ self.add_watch(join(root, d), self.mask)
+ wroot = root[self.prefixlen:]
+ for fn in files:
+ wfn = join(wroot, fn)
+ self.updatefile(wfn, self.getstat(wfn))
+ ds.pop(wfn, None)
+ wtopdir = topdir
+ if wtopdir and wtopdir[-1] != '/':
+ wtopdir += '/'
+ for wfn, state in ds.iteritems():
+ if not wfn.startswith(wtopdir):
+ continue
+ try:
+ st = self.stat(wfn)
+ except OSError:
+ status = state[0]
+ self.deletefile(wfn, status)
+ else:
+ self.updatefile(wfn, st)
+ self.check_deleted('!')
+ self.check_deleted('r')
+
+ def check_dirstate(self):
+ ds_info = self.dirstate_info()
+ if ds_info == self.ds_info:
+ return
+ self.ds_info = ds_info
+ if not self.ui.debugflag:
+ self.last_event = None
+ self.ui.note(_('%s dirstate reload\n') % self.event_time())
+ self.dirstate.invalidate()
+ self.handle_timeout()
+ self.scan()
+ self.ui.note(_('%s end dirstate reload\n') % self.event_time())
+
+ def update_hgignore(self):
+ # An update of the ignore file can potentially change the
+ # states of all unknown and ignored files.
+
+ # XXX If the user has other ignore files outside the repo, or
+ # changes their list of ignore files at run time, we'll
+ # potentially never see changes to them. We could get the
+ # client to report to us what ignore data they're using.
+ # But it's easier to do nothing than to open that can of
+ # worms.
+
+ if '_ignore' in self.dirstate.__dict__:
+ delattr(self.dirstate, '_ignore')
+ self.ui.note(_('rescanning due to .hgignore change\n'))
+ self.handle_timeout()
+ self.scan()
+
+ def getstat(self, wpath):
+ try:
+ return self.statcache[wpath]
+ except KeyError:
+ try:
+ return self.stat(wpath)
+ except OSError, err:
+ if err.errno != errno.ENOENT:
+ raise
+
+ def stat(self, wpath):
+ try:
+ st = os.lstat(join(self.wprefix, wpath))
+ ret = st.st_mode, st.st_size, st.st_mtime
+ self.statcache[wpath] = ret
+ return ret
+ except OSError:
+ self.statcache.pop(wpath, None)
+ raise
+
+ @eventaction('c')
+ def created(self, wpath):
+ if wpath == '.hgignore':
+ self.update_hgignore()
+ try:
+ st = self.stat(wpath)
+ if stat.S_ISREG(st[0]):
+ self.updatefile(wpath, st)
+ except OSError:
+ pass
+
+ @eventaction('m')
+ def modified(self, wpath):
+ if wpath == '.hgignore':
+ self.update_hgignore()
+ try:
+ st = self.stat(wpath)
+ if stat.S_ISREG(st[0]):
+ if self.dirstate[wpath] in 'lmn':
+ self.updatefile(wpath, st)
+ except OSError:
+ pass
+
+ @eventaction('d')
+ def deleted(self, wpath):
+ if wpath == '.hgignore':
+ self.update_hgignore()
+ elif wpath.startswith('.hg/'):
+ if wpath == '.hg/wlock':
+ self.check_dirstate()
+ return
+
+ self.deletefile(wpath, self.dirstate[wpath])
+
+ def process_create(self, wpath, evt):
+ if self.ui.debugflag:
+ self.ui.note(_('%s event: created %s\n') %
+ (self.event_time(), wpath))
+
+ if evt.mask & inotify.IN_ISDIR:
+ self.scan(wpath)
+ else:
+ self.created(wpath)
+
+ def process_delete(self, wpath, evt):
+ if self.ui.debugflag:
+ self.ui.note(_('%s event: deleted %s\n') %
+ (self.event_time(), wpath))
+
+ if evt.mask & inotify.IN_ISDIR:
+ tree = self.tree.dir(wpath)
+ todelete = [wfn for wfn, ignore in tree.walk('?')]
+ for fn in todelete:
+ self.deletefile(fn, '?')
+ self.scan(wpath)
+ else:
+ self.deleted(wpath)
+
+ def process_modify(self, wpath, evt):
+ if self.ui.debugflag:
+ self.ui.note(_('%s event: modified %s\n') %
+ (self.event_time(), wpath))
+
+ if not (evt.mask & inotify.IN_ISDIR):
+ self.modified(wpath)
+
+ def process_unmount(self, evt):
+ self.ui.warn(_('filesystem containing %s was unmounted\n') %
+ evt.fullpath)
+ sys.exit(0)
+
+ def handle_pollevents(self, events):
+ if self.ui.debugflag:
+ self.ui.note(_('%s readable: %d bytes\n') %
+ (self.event_time(), self.threshold.readable()))
+ if not self.threshold():
+ if self.registered:
+ if self.ui.debugflag:
+ self.ui.note(_('%s below threshold - unhooking\n') %
+ (self.event_time()))
+ self.unregister()
+ self.timeout = 250
+ else:
+ self.read_events()
+
+ def read_events(self, bufsize=None):
+ events = self.watcher.read(bufsize)
+ if self.ui.debugflag:
+ self.ui.note(_('%s reading %d events\n') %
+ (self.event_time(), len(events)))
+ for evt in events:
+ assert evt.fullpath.startswith(self.wprefix)
+ wpath = evt.fullpath[self.prefixlen:]
+
+ # paths have been normalized, wpath never ends with a '/'
+
+ if wpath.startswith('.hg/') and evt.mask & inotify.IN_ISDIR:
+ # ignore subdirectories of .hg/ (merge, patches...)
+ continue
+
+ if evt.mask & inotify.IN_UNMOUNT:
+ self.process_unmount(wpath, evt)
+ elif evt.mask & (inotify.IN_MODIFY | inotify.IN_ATTRIB):
+ self.process_modify(wpath, evt)
+ elif evt.mask & (inotify.IN_DELETE | inotify.IN_DELETE_SELF |
+ inotify.IN_MOVED_FROM):
+ self.process_delete(wpath, evt)
+ elif evt.mask & (inotify.IN_CREATE | inotify.IN_MOVED_TO):
+ self.process_create(wpath, evt)
+
+ self.lastevent.clear()
+
+ def handle_timeout(self):
+ if not self.registered:
+ if self.ui.debugflag:
+ self.ui.note(_('%s hooking back up with %d bytes readable\n') %
+ (self.event_time(), self.threshold.readable()))
+ self.read_events(0)
+ self.register(timeout=None)
+
+ self.timeout = None
+
+ def shutdown(self):
+ self.watcher.close()
+
+ def debug(self):
+ """
+ Returns a sorted list of relatives paths currently watched,
+ for debugging purposes.
+ """
+ return sorted(tuple[0][self.prefixlen:] for tuple in self.watcher)
+
+class server(pollable):
+ """
+ Listens for client queries on unix socket inotify.sock
+ """
+ def __init__(self, ui, root, repowatcher, timeout):
+ self.ui = ui
+ self.repowatcher = repowatcher
+ self.sock = socket.socket(socket.AF_UNIX)
+ self.sockpath = join(root, '.hg/inotify.sock')
+ self.realsockpath = None
+ try:
+ self.sock.bind(self.sockpath)
+ except socket.error, err:
+ if err[0] == errno.EADDRINUSE:
+ raise AlreadyStartedException(_('could not start server: %s')
+ % err[1])
+ if err[0] == "AF_UNIX path too long":
+ tempdir = tempfile.mkdtemp(prefix="hg-inotify-")
+ self.realsockpath = os.path.join(tempdir, "inotify.sock")
+ try:
+ self.sock.bind(self.realsockpath)
+ os.symlink(self.realsockpath, self.sockpath)
+ except (OSError, socket.error), inst:
+ try:
+ os.unlink(self.realsockpath)
+ except:
+ pass
+ os.rmdir(tempdir)
+ if inst.errno == errno.EEXIST:
+ raise AlreadyStartedException(_('could not start server: %s')
+ % inst.strerror)
+ raise
+ else:
+ raise
+ self.sock.listen(5)
+ self.fileno = self.sock.fileno
+ self.register(timeout=timeout)
+
+ def handle_timeout(self):
+ pass
+
+ def answer_stat_query(self, cs):
+ names = cs.read().split('\0')
+
+ states = names.pop()
+
+ self.ui.note(_('answering query for %r\n') % states)
+
+ if self.repowatcher.timeout:
+ # We got a query while a rescan is pending. Make sure we
+ # rescan before responding, or we could give back a wrong
+ # answer.
+ self.repowatcher.handle_timeout()
+
+ if not names:
+ def genresult(states, tree):
+ for fn, state in tree.walk(states):
+ yield fn
+ else:
+ def genresult(states, tree):
+ for fn in names:
+ for f in tree.lookup(states, fn):
+ yield f
+
+ return ['\0'.join(r) for r in [
+ genresult('l', self.repowatcher.statustrees['l']),
+ genresult('m', self.repowatcher.statustrees['m']),
+ genresult('a', self.repowatcher.statustrees['a']),
+ genresult('r', self.repowatcher.statustrees['r']),
+ genresult('!', self.repowatcher.statustrees['!']),
+ '?' in states
+ and genresult('?', self.repowatcher.statustrees['?'])
+ or [],
+ [],
+ 'c' in states and genresult('n', self.repowatcher.tree) or [],
+ ]]
+
+ def answer_dbug_query(self):
+ return ['\0'.join(self.repowatcher.debug())]
+
+ def handle_pollevents(self, events):
+ for e in events:
+ self.handle_pollevent()
+
+ def handle_pollevent(self):
+ sock, addr = self.sock.accept()
+
+ cs = common.recvcs(sock)
+ version = ord(cs.read(1))
+
+ if version != common.version:
+ self.ui.warn(_('received query from incompatible client '
+ 'version %d\n') % version)
+ try:
+ # try to send back our version to the client
+ # this way, the client too is informed of the mismatch
+ sock.sendall(chr(common.version))
+ except:
+ pass
+ return
+
+ type = cs.read(4)
+
+ if type == 'STAT':
+ results = self.answer_stat_query(cs)
+ elif type == 'DBUG':
+ results = self.answer_dbug_query()
+ else:
+ self.ui.warn(_('unrecognized query type: %s\n') % type)
+ return
+
+ try:
+ try:
+ v = chr(common.version)
+
+ sock.sendall(v + type + struct.pack(common.resphdrfmts[type],
+ *map(len, results)))
+ sock.sendall(''.join(results))
+ finally:
+ sock.shutdown(socket.SHUT_WR)
+ except socket.error, err:
+ if err[0] != errno.EPIPE:
+ raise
+
+ def shutdown(self):
+ self.sock.close()
+ try:
+ os.unlink(self.sockpath)
+ if self.realsockpath:
+ os.unlink(self.realsockpath)
+ os.rmdir(os.path.dirname(self.realsockpath))
+ except OSError, err:
+ if err.errno != errno.ENOENT:
+ raise
+
+class master(object):
+ def __init__(self, ui, dirstate, root, timeout=None):
+ self.ui = ui
+ self.repowatcher = repowatcher(ui, dirstate, root)
+ self.server = server(ui, root, self.repowatcher, timeout)
+
+ def shutdown(self):
+ for obj in pollable.instances.itervalues():
+ obj.shutdown()
+
+ def run(self):
+ self.repowatcher.setup()
+ self.ui.note(_('finished setup\n'))
+ if os.getenv('TIME_STARTUP'):
+ sys.exit(0)
+ pollable.run()
+
+def start(ui, dirstate, root):
+ def closefds(ignore):
+ # (from python bug #1177468)
+ # close all inherited file descriptors
+ # Python 2.4.1 and later use /dev/urandom to seed the random module's RNG
+ # a file descriptor is kept internally as os._urandomfd (created on demand
+ # the first time os.urandom() is called), and should not be closed
+ try:
+ os.urandom(4)
+ urandom_fd = getattr(os, '_urandomfd', None)
+ except AttributeError:
+ urandom_fd = None
+ ignore.append(urandom_fd)
+ for fd in range(3, 256):
+ if fd in ignore:
+ continue
+ try:
+ os.close(fd)
+ except OSError:
+ pass
+
+ m = master(ui, dirstate, root)
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ pid = os.fork()
+ if pid:
+ return pid
+
+ closefds(pollable.instances.keys())
+ os.setsid()
+
+ fd = os.open('/dev/null', os.O_RDONLY)
+ os.dup2(fd, 0)
+ if fd > 0:
+ os.close(fd)
+
+ fd = os.open(ui.config('inotify', 'log', '/dev/null'),
+ os.O_RDWR | os.O_CREAT | os.O_TRUNC)
+ os.dup2(fd, 1)
+ os.dup2(fd, 2)
+ if fd > 2:
+ os.close(fd)
+
+ try:
+ m.run()
+ finally:
+ m.shutdown()
+ os._exit(0)
diff --git a/sys/src/cmd/hg/hgext/interhg.py b/sys/src/cmd/hg/hgext/interhg.py
new file mode 100644
index 000000000..3660c4081
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/interhg.py
@@ -0,0 +1,80 @@
+# interhg.py - interhg
+#
+# Copyright 2007 OHASHI Hideya <ohachige@gmail.com>
+#
+# Contributor(s):
+# Edward Lee <edward.lee@engineering.uiuc.edu>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''expand expressions into changelog and summaries
+
+This extension allows the use of a special syntax in summaries, which
+will be automatically expanded into links or any other arbitrary
+expression, much like InterWiki does.
+
+A few example patterns (link to bug tracking, etc.) that may be used
+in your hgrc::
+
+ [interhg]
+ issues = s!issue(\\d+)!<a href="http://bts/issue\\1">issue\\1</a>!
+ bugzilla = s!((?:bug|b=|(?=#?\\d{4,}))(?:\\s*#?)(\\d+))!<a..=\\2">\\1</a>!i
+ boldify = s!(^|\\s)#(\\d+)\\b! <b>#\\2</b>!
+'''
+
+import re
+from mercurial.hgweb import hgweb_mod
+from mercurial import templatefilters, extensions
+from mercurial.i18n import _
+
+orig_escape = templatefilters.filters["escape"]
+
+interhg_table = []
+
+def interhg_escape(x):
+ escstr = orig_escape(x)
+ for regexp, format in interhg_table:
+ escstr = regexp.sub(format, escstr)
+ return escstr
+
+templatefilters.filters["escape"] = interhg_escape
+
+def interhg_refresh(orig, self):
+ interhg_table[:] = []
+ for key, pattern in self.repo.ui.configitems('interhg'):
+ # grab the delimiter from the character after the "s"
+ unesc = pattern[1]
+ delim = re.escape(unesc)
+
+ # identify portions of the pattern, taking care to avoid escaped
+ # delimiters. the replace format and flags are optional, but delimiters
+ # are required.
+ match = re.match(r'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
+ % (delim, delim, delim), pattern)
+ if not match:
+ self.repo.ui.warn(_("interhg: invalid pattern for %s: %s\n")
+ % (key, pattern))
+ continue
+
+ # we need to unescape the delimiter for regexp and format
+ delim_re = re.compile(r'(?<!\\)\\%s' % delim)
+ regexp = delim_re.sub(unesc, match.group(1))
+ format = delim_re.sub(unesc, match.group(2))
+
+ # the pattern allows for 6 regexp flags, so set them if necessary
+ flagin = match.group(3)
+ flags = 0
+ if flagin:
+ for flag in flagin.upper():
+ flags |= re.__dict__[flag]
+
+ try:
+ regexp = re.compile(regexp, flags)
+ interhg_table.append((regexp, format))
+ except re.error:
+ self.repo.ui.warn(_("interhg: invalid regexp for %s: %s\n")
+ % (key, regexp))
+ return orig(self)
+
+extensions.wrapfunction(hgweb_mod.hgweb, 'refresh', interhg_refresh)
diff --git a/sys/src/cmd/hg/hgext/keyword.py b/sys/src/cmd/hg/hgext/keyword.py
new file mode 100644
index 000000000..b331389cf
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/keyword.py
@@ -0,0 +1,555 @@
+# keyword.py - $Keyword$ expansion for Mercurial
+#
+# Copyright 2007-2009 Christian Ebert <blacktrash@gmx.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+#
+# $Id$
+#
+# Keyword expansion hack against the grain of a DSCM
+#
+# There are many good reasons why this is not needed in a distributed
+# SCM, still it may be useful in very small projects based on single
+# files (like LaTeX packages), that are mostly addressed to an
+# audience not running a version control system.
+#
+# For in-depth discussion refer to
+# <http://mercurial.selenic.com/wiki/KeywordPlan>.
+#
+# Keyword expansion is based on Mercurial's changeset template mappings.
+#
+# Binary files are not touched.
+#
+# Files to act upon/ignore are specified in the [keyword] section.
+# Customized keyword template mappings in the [keywordmaps] section.
+#
+# Run "hg help keyword" and "hg kwdemo" to get info on configuration.
+
+'''expand keywords in tracked files
+
+This extension expands RCS/CVS-like or self-customized $Keywords$ in
+tracked text files selected by your configuration.
+
+Keywords are only expanded in local repositories and not stored in the
+change history. The mechanism can be regarded as a convenience for the
+current user or for archive distribution.
+
+Configuration is done in the [keyword] and [keywordmaps] sections of
+hgrc files.
+
+Example::
+
+ [keyword]
+ # expand keywords in every python file except those matching "x*"
+ **.py =
+ x* = ignore
+
+NOTE: the more specific you are in your filename patterns the less you
+lose speed in huge repositories.
+
+For [keywordmaps] template mapping and expansion demonstration and
+control run "hg kwdemo". See "hg help templates" for a list of
+available templates and filters.
+
+An additional date template filter {date|utcdate} is provided. It
+returns a date like "2006/09/18 15:13:13".
+
+The default template mappings (view with "hg kwdemo -d") can be
+replaced with customized keywords and templates. Again, run "hg
+kwdemo" to control the results of your config changes.
+
+Before changing/disabling active keywords, run "hg kwshrink" to avoid
+the risk of inadvertently storing expanded keywords in the change
+history.
+
+To force expansion after enabling it, or a configuration change, run
+"hg kwexpand".
+
+Also, when committing with the record extension or using mq's qrecord,
+be aware that keywords cannot be updated. Again, run "hg kwexpand" on
+the files in question to update keyword expansions after all changes
+have been checked in.
+
+Expansions spanning more than one line and incremental expansions,
+like CVS' $Log$, are not supported. A keyword template map "Log =
+{desc}" expands to the first line of the changeset description.
+'''
+
+from mercurial import commands, cmdutil, dispatch, filelog, revlog, extensions
+from mercurial import patch, localrepo, templater, templatefilters, util, match
+from mercurial.hgweb import webcommands
+from mercurial.lock import release
+from mercurial.node import nullid
+from mercurial.i18n import _
+import re, shutil, tempfile
+
+commands.optionalrepo += ' kwdemo'
+
+# hg commands that do not act on keywords
+nokwcommands = ('add addremove annotate bundle copy export grep incoming init'
+ ' log outgoing push rename rollback tip verify'
+ ' convert email glog')
+
+# hg commands that trigger expansion only when writing to working dir,
+# not when reading filelog, and unexpand when reading from working dir
+restricted = 'merge record resolve qfold qimport qnew qpush qrefresh qrecord'
+
+# provide cvs-like UTC date filter
+utcdate = lambda x: util.datestr(x, '%Y/%m/%d %H:%M:%S')
+
+# make keyword tools accessible
+kwtools = {'templater': None, 'hgcmd': '', 'inc': [], 'exc': ['.hg*']}
+
+
+class kwtemplater(object):
+ '''
+ Sets up keyword templates, corresponding keyword regex, and
+ provides keyword substitution functions.
+ '''
+ templates = {
+ 'Revision': '{node|short}',
+ 'Author': '{author|user}',
+ 'Date': '{date|utcdate}',
+ 'RCSFile': '{file|basename},v',
+ 'Source': '{root}/{file},v',
+ 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
+ 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
+ }
+
+ def __init__(self, ui, repo):
+ self.ui = ui
+ self.repo = repo
+ self.match = match.match(repo.root, '', [],
+ kwtools['inc'], kwtools['exc'])
+ self.restrict = kwtools['hgcmd'] in restricted.split()
+
+ kwmaps = self.ui.configitems('keywordmaps')
+ if kwmaps: # override default templates
+ self.templates = dict((k, templater.parsestring(v, False))
+ for k, v in kwmaps)
+ escaped = map(re.escape, self.templates.keys())
+ kwpat = r'\$(%s)(: [^$\n\r]*? )??\$' % '|'.join(escaped)
+ self.re_kw = re.compile(kwpat)
+
+ templatefilters.filters['utcdate'] = utcdate
+ self.ct = cmdutil.changeset_templater(self.ui, self.repo,
+ False, None, '', False)
+
+ def substitute(self, data, path, ctx, subfunc):
+ '''Replaces keywords in data with expanded template.'''
+ def kwsub(mobj):
+ kw = mobj.group(1)
+ self.ct.use_template(self.templates[kw])
+ self.ui.pushbuffer()
+ self.ct.show(ctx, root=self.repo.root, file=path)
+ ekw = templatefilters.firstline(self.ui.popbuffer())
+ return '$%s: %s $' % (kw, ekw)
+ return subfunc(kwsub, data)
+
+ def expand(self, path, node, data):
+ '''Returns data with keywords expanded.'''
+ if not self.restrict and self.match(path) and not util.binary(data):
+ ctx = self.repo.filectx(path, fileid=node).changectx()
+ return self.substitute(data, path, ctx, self.re_kw.sub)
+ return data
+
+ def iskwfile(self, path, flagfunc):
+ '''Returns true if path matches [keyword] pattern
+ and is not a symbolic link.
+ Caveat: localrepository._link fails on Windows.'''
+ return self.match(path) and not 'l' in flagfunc(path)
+
+ def overwrite(self, node, expand, files):
+ '''Overwrites selected files expanding/shrinking keywords.'''
+ ctx = self.repo[node]
+ mf = ctx.manifest()
+ if node is not None: # commit
+ files = [f for f in ctx.files() if f in mf]
+ notify = self.ui.debug
+ else: # kwexpand/kwshrink
+ notify = self.ui.note
+ candidates = [f for f in files if self.iskwfile(f, ctx.flags)]
+ if candidates:
+ self.restrict = True # do not expand when reading
+ msg = (expand and _('overwriting %s expanding keywords\n')
+ or _('overwriting %s shrinking keywords\n'))
+ for f in candidates:
+ fp = self.repo.file(f)
+ data = fp.read(mf[f])
+ if util.binary(data):
+ continue
+ if expand:
+ if node is None:
+ ctx = self.repo.filectx(f, fileid=mf[f]).changectx()
+ data, found = self.substitute(data, f, ctx,
+ self.re_kw.subn)
+ else:
+ found = self.re_kw.search(data)
+ if found:
+ notify(msg % f)
+ self.repo.wwrite(f, data, mf.flags(f))
+ if node is None:
+ self.repo.dirstate.normal(f)
+ self.restrict = False
+
+ def shrinktext(self, text):
+ '''Unconditionally removes all keyword substitutions from text.'''
+ return self.re_kw.sub(r'$\1$', text)
+
+ def shrink(self, fname, text):
+ '''Returns text with all keyword substitutions removed.'''
+ if self.match(fname) and not util.binary(text):
+ return self.shrinktext(text)
+ return text
+
+ def shrinklines(self, fname, lines):
+ '''Returns lines with keyword substitutions removed.'''
+ if self.match(fname):
+ text = ''.join(lines)
+ if not util.binary(text):
+ return self.shrinktext(text).splitlines(True)
+ return lines
+
+ def wread(self, fname, data):
+ '''If in restricted mode returns data read from wdir with
+ keyword substitutions removed.'''
+ return self.restrict and self.shrink(fname, data) or data
+
+class kwfilelog(filelog.filelog):
+ '''
+ Subclass of filelog to hook into its read, add, cmp methods.
+ Keywords are "stored" unexpanded, and processed on reading.
+ '''
+ def __init__(self, opener, kwt, path):
+ super(kwfilelog, self).__init__(opener, path)
+ self.kwt = kwt
+ self.path = path
+
+ def read(self, node):
+ '''Expands keywords when reading filelog.'''
+ data = super(kwfilelog, self).read(node)
+ return self.kwt.expand(self.path, node, data)
+
+ def add(self, text, meta, tr, link, p1=None, p2=None):
+ '''Removes keyword substitutions when adding to filelog.'''
+ text = self.kwt.shrink(self.path, text)
+ return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
+
+ def cmp(self, node, text):
+ '''Removes keyword substitutions for comparison.'''
+ text = self.kwt.shrink(self.path, text)
+ if self.renamed(node):
+ t2 = super(kwfilelog, self).read(node)
+ return t2 != text
+ return revlog.revlog.cmp(self, node, text)
+
+def _status(ui, repo, kwt, unknown, *pats, **opts):
+ '''Bails out if [keyword] configuration is not active.
+ Returns status of working directory.'''
+ if kwt:
+ match = cmdutil.match(repo, pats, opts)
+ return repo.status(match=match, unknown=unknown, clean=True)
+ if ui.configitems('keyword'):
+ raise util.Abort(_('[keyword] patterns cannot match'))
+ raise util.Abort(_('no [keyword] patterns configured'))
+
+def _kwfwrite(ui, repo, expand, *pats, **opts):
+ '''Selects files and passes them to kwtemplater.overwrite.'''
+ if repo.dirstate.parents()[1] != nullid:
+ raise util.Abort(_('outstanding uncommitted merge'))
+ kwt = kwtools['templater']
+ status = _status(ui, repo, kwt, False, *pats, **opts)
+ modified, added, removed, deleted = status[:4]
+ if modified or added or removed or deleted:
+ raise util.Abort(_('outstanding uncommitted changes'))
+ wlock = lock = None
+ try:
+ wlock = repo.wlock()
+ lock = repo.lock()
+ kwt.overwrite(None, expand, status[6])
+ finally:
+ release(lock, wlock)
+
+def demo(ui, repo, *args, **opts):
+ '''print [keywordmaps] configuration and an expansion example
+
+ Show current, custom, or default keyword template maps and their
+ expansions.
+
+ Extend the current configuration by specifying maps as arguments
+ and using -f/--rcfile to source an external hgrc file.
+
+ Use -d/--default to disable current configuration.
+
+ See "hg help templates" for information on templates and filters.
+ '''
+ def demoitems(section, items):
+ ui.write('[%s]\n' % section)
+ for k, v in items:
+ ui.write('%s = %s\n' % (k, v))
+
+ msg = 'hg keyword config and expansion example'
+ fn = 'demo.txt'
+ branchname = 'demobranch'
+ tmpdir = tempfile.mkdtemp('', 'kwdemo.')
+ ui.note(_('creating temporary repository at %s\n') % tmpdir)
+ repo = localrepo.localrepository(ui, tmpdir, True)
+ ui.setconfig('keyword', fn, '')
+
+ uikwmaps = ui.configitems('keywordmaps')
+ if args or opts.get('rcfile'):
+ ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
+ if uikwmaps:
+ ui.status(_('\textending current template maps\n'))
+ if opts.get('default') or not uikwmaps:
+ ui.status(_('\toverriding default template maps\n'))
+ if opts.get('rcfile'):
+ ui.readconfig(opts.get('rcfile'))
+ if args:
+ # simulate hgrc parsing
+ rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args]
+ fp = repo.opener('hgrc', 'w')
+ fp.writelines(rcmaps)
+ fp.close()
+ ui.readconfig(repo.join('hgrc'))
+ kwmaps = dict(ui.configitems('keywordmaps'))
+ elif opts.get('default'):
+ ui.status(_('\n\tconfiguration using default keyword template maps\n'))
+ kwmaps = kwtemplater.templates
+ if uikwmaps:
+ ui.status(_('\tdisabling current template maps\n'))
+ for k, v in kwmaps.iteritems():
+ ui.setconfig('keywordmaps', k, v)
+ else:
+ ui.status(_('\n\tconfiguration using current keyword template maps\n'))
+ kwmaps = dict(uikwmaps) or kwtemplater.templates
+
+ uisetup(ui)
+ reposetup(ui, repo)
+ for k, v in ui.configitems('extensions'):
+ if k.endswith('keyword'):
+ extension = '%s = %s' % (k, v)
+ break
+ ui.write('[extensions]\n%s\n' % extension)
+ demoitems('keyword', ui.configitems('keyword'))
+ demoitems('keywordmaps', kwmaps.iteritems())
+ keywords = '$' + '$\n$'.join(kwmaps.keys()) + '$\n'
+ repo.wopener(fn, 'w').write(keywords)
+ repo.add([fn])
+ path = repo.wjoin(fn)
+ ui.note(_('\nkeywords written to %s:\n') % path)
+ ui.note(keywords)
+ ui.note('\nhg -R "%s" branch "%s"\n' % (tmpdir, branchname))
+ # silence branch command if not verbose
+ quiet = ui.quiet
+ ui.quiet = not ui.verbose
+ commands.branch(ui, repo, branchname)
+ ui.quiet = quiet
+ for name, cmd in ui.configitems('hooks'):
+ if name.split('.', 1)[0].find('commit') > -1:
+ repo.ui.setconfig('hooks', name, '')
+ ui.note(_('unhooked all commit hooks\n'))
+ ui.note('hg -R "%s" ci -m "%s"\n' % (tmpdir, msg))
+ repo.commit(text=msg)
+ ui.status(_('\n\tkeywords expanded\n'))
+ ui.write(repo.wread(fn))
+ ui.debug(_('\nremoving temporary repository %s\n') % tmpdir)
+ shutil.rmtree(tmpdir, ignore_errors=True)
+
+def expand(ui, repo, *pats, **opts):
+ '''expand keywords in the working directory
+
+ Run after (re)enabling keyword expansion.
+
+ kwexpand refuses to run if given files contain local changes.
+ '''
+ # 3rd argument sets expansion to True
+ _kwfwrite(ui, repo, True, *pats, **opts)
+
+def files(ui, repo, *pats, **opts):
+ '''show files configured for keyword expansion
+
+ List which files in the working directory are matched by the
+ [keyword] configuration patterns.
+
+ Useful to prevent inadvertent keyword expansion and to speed up
+ execution by including only files that are actual candidates for
+ expansion.
+
+ See "hg help keyword" on how to construct patterns both for
+ inclusion and exclusion of files.
+
+ Use -u/--untracked to list untracked files as well.
+
+ With -a/--all and -v/--verbose the codes used to show the status
+ of files are::
+
+ K = keyword expansion candidate
+ k = keyword expansion candidate (untracked)
+ I = ignored
+ i = ignored (untracked)
+ '''
+ kwt = kwtools['templater']
+ status = _status(ui, repo, kwt, opts.get('untracked'), *pats, **opts)
+ modified, added, removed, deleted, unknown, ignored, clean = status
+ files = sorted(modified + added + clean)
+ wctx = repo[None]
+ kwfiles = [f for f in files if kwt.iskwfile(f, wctx.flags)]
+ kwuntracked = [f for f in unknown if kwt.iskwfile(f, wctx.flags)]
+ cwd = pats and repo.getcwd() or ''
+ kwfstats = (not opts.get('ignore') and
+ (('K', kwfiles), ('k', kwuntracked),) or ())
+ if opts.get('all') or opts.get('ignore'):
+ kwfstats += (('I', [f for f in files if f not in kwfiles]),
+ ('i', [f for f in unknown if f not in kwuntracked]),)
+ for char, filenames in kwfstats:
+ fmt = (opts.get('all') or ui.verbose) and '%s %%s\n' % char or '%s\n'
+ for f in filenames:
+ ui.write(fmt % repo.pathto(f, cwd))
+
+def shrink(ui, repo, *pats, **opts):
+ '''revert expanded keywords in the working directory
+
+ Run before changing/disabling active keywords or if you experience
+ problems with "hg import" or "hg merge".
+
+ kwshrink refuses to run if given files contain local changes.
+ '''
+ # 3rd argument sets expansion to False
+ _kwfwrite(ui, repo, False, *pats, **opts)
+
+
+def uisetup(ui):
+ '''Collects [keyword] config in kwtools.
+ Monkeypatches dispatch._parse if needed.'''
+
+ for pat, opt in ui.configitems('keyword'):
+ if opt != 'ignore':
+ kwtools['inc'].append(pat)
+ else:
+ kwtools['exc'].append(pat)
+
+ if kwtools['inc']:
+ def kwdispatch_parse(orig, ui, args):
+ '''Monkeypatch dispatch._parse to obtain running hg command.'''
+ cmd, func, args, options, cmdoptions = orig(ui, args)
+ kwtools['hgcmd'] = cmd
+ return cmd, func, args, options, cmdoptions
+
+ extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
+
+def reposetup(ui, repo):
+ '''Sets up repo as kwrepo for keyword substitution.
+ Overrides file method to return kwfilelog instead of filelog
+ if file matches user configuration.
+ Wraps commit to overwrite configured files with updated
+ keyword substitutions.
+ Monkeypatches patch and webcommands.'''
+
+ try:
+ if (not repo.local() or not kwtools['inc']
+ or kwtools['hgcmd'] in nokwcommands.split()
+ or '.hg' in util.splitpath(repo.root)
+ or repo._url.startswith('bundle:')):
+ return
+ except AttributeError:
+ pass
+
+ kwtools['templater'] = kwt = kwtemplater(ui, repo)
+
+ class kwrepo(repo.__class__):
+ def file(self, f):
+ if f[0] == '/':
+ f = f[1:]
+ return kwfilelog(self.sopener, kwt, f)
+
+ def wread(self, filename):
+ data = super(kwrepo, self).wread(filename)
+ return kwt.wread(filename, data)
+
+ def commit(self, *args, **opts):
+ # use custom commitctx for user commands
+ # other extensions can still wrap repo.commitctx directly
+ self.commitctx = self.kwcommitctx
+ try:
+ return super(kwrepo, self).commit(*args, **opts)
+ finally:
+ del self.commitctx
+
+ def kwcommitctx(self, ctx, error=False):
+ wlock = lock = None
+ try:
+ wlock = self.wlock()
+ lock = self.lock()
+ # store and postpone commit hooks
+ commithooks = {}
+ for name, cmd in ui.configitems('hooks'):
+ if name.split('.', 1)[0] == 'commit':
+ commithooks[name] = cmd
+ ui.setconfig('hooks', name, None)
+ if commithooks:
+ # store parents for commit hooks
+ p1, p2 = ctx.p1(), ctx.p2()
+ xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
+
+ n = super(kwrepo, self).commitctx(ctx, error)
+
+ kwt.overwrite(n, True, None)
+ if commithooks:
+ for name, cmd in commithooks.iteritems():
+ ui.setconfig('hooks', name, cmd)
+ self.hook('commit', node=n, parent1=xp1, parent2=xp2)
+ return n
+ finally:
+ release(lock, wlock)
+
+ # monkeypatches
+ def kwpatchfile_init(orig, self, ui, fname, opener,
+ missing=False, eol=None):
+ '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
+ rejects or conflicts due to expanded keywords in working dir.'''
+ orig(self, ui, fname, opener, missing, eol)
+ # shrink keywords read from working dir
+ self.lines = kwt.shrinklines(self.fname, self.lines)
+
+ def kw_diff(orig, repo, node1=None, node2=None, match=None, changes=None,
+ opts=None):
+ '''Monkeypatch patch.diff to avoid expansion except when
+ comparing against working dir.'''
+ if node2 is not None:
+ kwt.match = util.never
+ elif node1 is not None and node1 != repo['.'].node():
+ kwt.restrict = True
+ return orig(repo, node1, node2, match, changes, opts)
+
+ def kwweb_skip(orig, web, req, tmpl):
+ '''Wraps webcommands.x turning off keyword expansion.'''
+ kwt.match = util.never
+ return orig(web, req, tmpl)
+
+ repo.__class__ = kwrepo
+
+ extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
+ extensions.wrapfunction(patch, 'diff', kw_diff)
+ for c in 'annotate changeset rev filediff diff'.split():
+ extensions.wrapfunction(webcommands, c, kwweb_skip)
+
+cmdtable = {
+ 'kwdemo':
+ (demo,
+ [('d', 'default', None, _('show default keyword template maps')),
+ ('f', 'rcfile', '', _('read maps from rcfile'))],
+ _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...')),
+ 'kwexpand': (expand, commands.walkopts,
+ _('hg kwexpand [OPTION]... [FILE]...')),
+ 'kwfiles':
+ (files,
+ [('a', 'all', None, _('show keyword status flags of all files')),
+ ('i', 'ignore', None, _('show files excluded from expansion')),
+ ('u', 'untracked', None, _('additionally show untracked files')),
+ ] + commands.walkopts,
+ _('hg kwfiles [OPTION]... [FILE]...')),
+ 'kwshrink': (shrink, commands.walkopts,
+ _('hg kwshrink [OPTION]... [FILE]...')),
+}
diff --git a/sys/src/cmd/hg/hgext/mq.py b/sys/src/cmd/hg/hgext/mq.py
new file mode 100644
index 000000000..a2be932c4
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/mq.py
@@ -0,0 +1,2653 @@
+# mq.py - patch queues for mercurial
+#
+# Copyright 2005, 2006 Chris Mason <mason@suse.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''manage a stack of patches
+
+This extension lets you work with a stack of patches in a Mercurial
+repository. It manages two stacks of patches - all known patches, and
+applied patches (subset of known patches).
+
+Known patches are represented as patch files in the .hg/patches
+directory. Applied patches are both patch files and changesets.
+
+Common tasks (use "hg help command" for more details)::
+
+ prepare repository to work with patches qinit
+ create new patch qnew
+ import existing patch qimport
+
+ print patch series qseries
+ print applied patches qapplied
+ print name of top applied patch qtop
+
+ add known patch to applied stack qpush
+ remove patch from applied stack qpop
+ refresh contents of top applied patch qrefresh
+'''
+
+from mercurial.i18n import _
+from mercurial.node import bin, hex, short, nullid, nullrev
+from mercurial.lock import release
+from mercurial import commands, cmdutil, hg, patch, util
+from mercurial import repair, extensions, url, error
+import os, sys, re, errno
+
+commands.norepo += " qclone"
+
+# Patch names looks like unix-file names.
+# They must be joinable with queue directory and result in the patch path.
+normname = util.normpath
+
+class statusentry(object):
+ def __init__(self, rev, name=None):
+ if not name:
+ fields = rev.split(':', 1)
+ if len(fields) == 2:
+ self.rev, self.name = fields
+ else:
+ self.rev, self.name = None, None
+ else:
+ self.rev, self.name = rev, name
+
+ def __str__(self):
+ return self.rev + ':' + self.name
+
+class patchheader(object):
+ def __init__(self, pf):
+ def eatdiff(lines):
+ while lines:
+ l = lines[-1]
+ if (l.startswith("diff -") or
+ l.startswith("Index:") or
+ l.startswith("===========")):
+ del lines[-1]
+ else:
+ break
+ def eatempty(lines):
+ while lines:
+ l = lines[-1]
+ if re.match('\s*$', l):
+ del lines[-1]
+ else:
+ break
+
+ message = []
+ comments = []
+ user = None
+ date = None
+ format = None
+ subject = None
+ diffstart = 0
+
+ for line in file(pf):
+ line = line.rstrip()
+ if line.startswith('diff --git'):
+ diffstart = 2
+ break
+ if diffstart:
+ if line.startswith('+++ '):
+ diffstart = 2
+ break
+ if line.startswith("--- "):
+ diffstart = 1
+ continue
+ elif format == "hgpatch":
+ # parse values when importing the result of an hg export
+ if line.startswith("# User "):
+ user = line[7:]
+ elif line.startswith("# Date "):
+ date = line[7:]
+ elif not line.startswith("# ") and line:
+ message.append(line)
+ format = None
+ elif line == '# HG changeset patch':
+ message = []
+ format = "hgpatch"
+ elif (format != "tagdone" and (line.startswith("Subject: ") or
+ line.startswith("subject: "))):
+ subject = line[9:]
+ format = "tag"
+ elif (format != "tagdone" and (line.startswith("From: ") or
+ line.startswith("from: "))):
+ user = line[6:]
+ format = "tag"
+ elif format == "tag" and line == "":
+ # when looking for tags (subject: from: etc) they
+ # end once you find a blank line in the source
+ format = "tagdone"
+ elif message or line:
+ message.append(line)
+ comments.append(line)
+
+ eatdiff(message)
+ eatdiff(comments)
+ eatempty(message)
+ eatempty(comments)
+
+ # make sure message isn't empty
+ if format and format.startswith("tag") and subject:
+ message.insert(0, "")
+ message.insert(0, subject)
+
+ self.message = message
+ self.comments = comments
+ self.user = user
+ self.date = date
+ self.haspatch = diffstart > 1
+
+ def setuser(self, user):
+ if not self.updateheader(['From: ', '# User '], user):
+ try:
+ patchheaderat = self.comments.index('# HG changeset patch')
+ self.comments.insert(patchheaderat + 1, '# User ' + user)
+ except ValueError:
+ if self._hasheader(['Date: ']):
+ self.comments = ['From: ' + user] + self.comments
+ else:
+ tmp = ['# HG changeset patch', '# User ' + user, '']
+ self.comments = tmp + self.comments
+ self.user = user
+
+ def setdate(self, date):
+ if not self.updateheader(['Date: ', '# Date '], date):
+ try:
+ patchheaderat = self.comments.index('# HG changeset patch')
+ self.comments.insert(patchheaderat + 1, '# Date ' + date)
+ except ValueError:
+ if self._hasheader(['From: ']):
+ self.comments = ['Date: ' + date] + self.comments
+ else:
+ tmp = ['# HG changeset patch', '# Date ' + date, '']
+ self.comments = tmp + self.comments
+ self.date = date
+
+ def setmessage(self, message):
+ if self.comments:
+ self._delmsg()
+ self.message = [message]
+ self.comments += self.message
+
+ def updateheader(self, prefixes, new):
+ '''Update all references to a field in the patch header.
+ Return whether the field is present.'''
+ res = False
+ for prefix in prefixes:
+ for i in xrange(len(self.comments)):
+ if self.comments[i].startswith(prefix):
+ self.comments[i] = prefix + new
+ res = True
+ break
+ return res
+
+ def _hasheader(self, prefixes):
+ '''Check if a header starts with any of the given prefixes.'''
+ for prefix in prefixes:
+ for comment in self.comments:
+ if comment.startswith(prefix):
+ return True
+ return False
+
+ def __str__(self):
+ if not self.comments:
+ return ''
+ return '\n'.join(self.comments) + '\n\n'
+
+ def _delmsg(self):
+ '''Remove existing message, keeping the rest of the comments fields.
+ If comments contains 'subject: ', message will prepend
+ the field and a blank line.'''
+ if self.message:
+ subj = 'subject: ' + self.message[0].lower()
+ for i in xrange(len(self.comments)):
+ if subj == self.comments[i].lower():
+ del self.comments[i]
+ self.message = self.message[2:]
+ break
+ ci = 0
+ for mi in self.message:
+ while mi != self.comments[ci]:
+ ci += 1
+ del self.comments[ci]
+
+class queue(object):
+ def __init__(self, ui, path, patchdir=None):
+ self.basepath = path
+ self.path = patchdir or os.path.join(path, "patches")
+ self.opener = util.opener(self.path)
+ self.ui = ui
+ self.applied_dirty = 0
+ self.series_dirty = 0
+ self.series_path = "series"
+ self.status_path = "status"
+ self.guards_path = "guards"
+ self.active_guards = None
+ self.guards_dirty = False
+ self._diffopts = None
+
+ @util.propertycache
+ def applied(self):
+ if os.path.exists(self.join(self.status_path)):
+ lines = self.opener(self.status_path).read().splitlines()
+ return [statusentry(l) for l in lines]
+ return []
+
+ @util.propertycache
+ def full_series(self):
+ if os.path.exists(self.join(self.series_path)):
+ return self.opener(self.series_path).read().splitlines()
+ return []
+
+ @util.propertycache
+ def series(self):
+ self.parse_series()
+ return self.series
+
+ @util.propertycache
+ def series_guards(self):
+ self.parse_series()
+ return self.series_guards
+
+ def invalidate(self):
+ for a in 'applied full_series series series_guards'.split():
+ if a in self.__dict__:
+ delattr(self, a)
+ self.applied_dirty = 0
+ self.series_dirty = 0
+ self.guards_dirty = False
+ self.active_guards = None
+
+ def diffopts(self):
+ if self._diffopts is None:
+ self._diffopts = patch.diffopts(self.ui)
+ return self._diffopts
+
+ def join(self, *p):
+ return os.path.join(self.path, *p)
+
+ def find_series(self, patch):
+ pre = re.compile("(\s*)([^#]+)")
+ index = 0
+ for l in self.full_series:
+ m = pre.match(l)
+ if m:
+ s = m.group(2)
+ s = s.rstrip()
+ if s == patch:
+ return index
+ index += 1
+ return None
+
+ guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
+
+ def parse_series(self):
+ self.series = []
+ self.series_guards = []
+ for l in self.full_series:
+ h = l.find('#')
+ if h == -1:
+ patch = l
+ comment = ''
+ elif h == 0:
+ continue
+ else:
+ patch = l[:h]
+ comment = l[h:]
+ patch = patch.strip()
+ if patch:
+ if patch in self.series:
+ raise util.Abort(_('%s appears more than once in %s') %
+ (patch, self.join(self.series_path)))
+ self.series.append(patch)
+ self.series_guards.append(self.guard_re.findall(comment))
+
+ def check_guard(self, guard):
+ if not guard:
+ return _('guard cannot be an empty string')
+ bad_chars = '# \t\r\n\f'
+ first = guard[0]
+ if first in '-+':
+ return (_('guard %r starts with invalid character: %r') %
+ (guard, first))
+ for c in bad_chars:
+ if c in guard:
+ return _('invalid character in guard %r: %r') % (guard, c)
+
+ def set_active(self, guards):
+ for guard in guards:
+ bad = self.check_guard(guard)
+ if bad:
+ raise util.Abort(bad)
+ guards = sorted(set(guards))
+ self.ui.debug(_('active guards: %s\n') % ' '.join(guards))
+ self.active_guards = guards
+ self.guards_dirty = True
+
+ def active(self):
+ if self.active_guards is None:
+ self.active_guards = []
+ try:
+ guards = self.opener(self.guards_path).read().split()
+ except IOError, err:
+ if err.errno != errno.ENOENT: raise
+ guards = []
+ for i, guard in enumerate(guards):
+ bad = self.check_guard(guard)
+ if bad:
+ self.ui.warn('%s:%d: %s\n' %
+ (self.join(self.guards_path), i + 1, bad))
+ else:
+ self.active_guards.append(guard)
+ return self.active_guards
+
+ def set_guards(self, idx, guards):
+ for g in guards:
+ if len(g) < 2:
+ raise util.Abort(_('guard %r too short') % g)
+ if g[0] not in '-+':
+ raise util.Abort(_('guard %r starts with invalid char') % g)
+ bad = self.check_guard(g[1:])
+ if bad:
+ raise util.Abort(bad)
+ drop = self.guard_re.sub('', self.full_series[idx])
+ self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
+ self.parse_series()
+ self.series_dirty = True
+
+ def pushable(self, idx):
+ if isinstance(idx, str):
+ idx = self.series.index(idx)
+ patchguards = self.series_guards[idx]
+ if not patchguards:
+ return True, None
+ guards = self.active()
+ exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
+ if exactneg:
+ return False, exactneg[0]
+ pos = [g for g in patchguards if g[0] == '+']
+ exactpos = [g for g in pos if g[1:] in guards]
+ if pos:
+ if exactpos:
+ return True, exactpos[0]
+ return False, pos
+ return True, ''
+
+ def explain_pushable(self, idx, all_patches=False):
+ write = all_patches and self.ui.write or self.ui.warn
+ if all_patches or self.ui.verbose:
+ if isinstance(idx, str):
+ idx = self.series.index(idx)
+ pushable, why = self.pushable(idx)
+ if all_patches and pushable:
+ if why is None:
+ write(_('allowing %s - no guards in effect\n') %
+ self.series[idx])
+ else:
+ if not why:
+ write(_('allowing %s - no matching negative guards\n') %
+ self.series[idx])
+ else:
+ write(_('allowing %s - guarded by %r\n') %
+ (self.series[idx], why))
+ if not pushable:
+ if why:
+ write(_('skipping %s - guarded by %r\n') %
+ (self.series[idx], why))
+ else:
+ write(_('skipping %s - no matching guards\n') %
+ self.series[idx])
+
+ def save_dirty(self):
+ def write_list(items, path):
+ fp = self.opener(path, 'w')
+ for i in items:
+ fp.write("%s\n" % i)
+ fp.close()
+ if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
+ if self.series_dirty: write_list(self.full_series, self.series_path)
+ if self.guards_dirty: write_list(self.active_guards, self.guards_path)
+
+ def removeundo(self, repo):
+ undo = repo.sjoin('undo')
+ if not os.path.exists(undo):
+ return
+ try:
+ os.unlink(undo)
+ except OSError, inst:
+ self.ui.warn(_('error removing undo: %s\n') % str(inst))
+
+ def printdiff(self, repo, node1, node2=None, files=None,
+ fp=None, changes=None, opts={}):
+ m = cmdutil.match(repo, files, opts)
+ chunks = patch.diff(repo, node1, node2, m, changes, self.diffopts())
+ write = fp is None and repo.ui.write or fp.write
+ for chunk in chunks:
+ write(chunk)
+
+ def mergeone(self, repo, mergeq, head, patch, rev):
+ # first try just applying the patch
+ (err, n) = self.apply(repo, [ patch ], update_status=False,
+ strict=True, merge=rev)
+
+ if err == 0:
+ return (err, n)
+
+ if n is None:
+ raise util.Abort(_("apply failed for patch %s") % patch)
+
+ self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
+
+ # apply failed, strip away that rev and merge.
+ hg.clean(repo, head)
+ self.strip(repo, n, update=False, backup='strip')
+
+ ctx = repo[rev]
+ ret = hg.merge(repo, rev)
+ if ret:
+ raise util.Abort(_("update returned %d") % ret)
+ n = repo.commit(ctx.description(), ctx.user(), force=True)
+ if n is None:
+ raise util.Abort(_("repo commit failed"))
+ try:
+ ph = patchheader(mergeq.join(patch))
+ except:
+ raise util.Abort(_("unable to read %s") % patch)
+
+ patchf = self.opener(patch, "w")
+ comments = str(ph)
+ if comments:
+ patchf.write(comments)
+ self.printdiff(repo, head, n, fp=patchf)
+ patchf.close()
+ self.removeundo(repo)
+ return (0, n)
+
+ def qparents(self, repo, rev=None):
+ if rev is None:
+ (p1, p2) = repo.dirstate.parents()
+ if p2 == nullid:
+ return p1
+ if len(self.applied) == 0:
+ return None
+ return bin(self.applied[-1].rev)
+ pp = repo.changelog.parents(rev)
+ if pp[1] != nullid:
+ arevs = [ x.rev for x in self.applied ]
+ p0 = hex(pp[0])
+ p1 = hex(pp[1])
+ if p0 in arevs:
+ return pp[0]
+ if p1 in arevs:
+ return pp[1]
+ return pp[0]
+
+ def mergepatch(self, repo, mergeq, series):
+ if len(self.applied) == 0:
+ # each of the patches merged in will have two parents. This
+ # can confuse the qrefresh, qdiff, and strip code because it
+ # needs to know which parent is actually in the patch queue.
+ # so, we insert a merge marker with only one parent. This way
+ # the first patch in the queue is never a merge patch
+ #
+ pname = ".hg.patches.merge.marker"
+ n = repo.commit('[mq]: merge marker', force=True)
+ self.removeundo(repo)
+ self.applied.append(statusentry(hex(n), pname))
+ self.applied_dirty = 1
+
+ head = self.qparents(repo)
+
+ for patch in series:
+ patch = mergeq.lookup(patch, strict=True)
+ if not patch:
+ self.ui.warn(_("patch %s does not exist\n") % patch)
+ return (1, None)
+ pushable, reason = self.pushable(patch)
+ if not pushable:
+ self.explain_pushable(patch, all_patches=True)
+ continue
+ info = mergeq.isapplied(patch)
+ if not info:
+ self.ui.warn(_("patch %s is not applied\n") % patch)
+ return (1, None)
+ rev = bin(info[1])
+ (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
+ if head:
+ self.applied.append(statusentry(hex(head), patch))
+ self.applied_dirty = 1
+ if err:
+ return (err, head)
+ self.save_dirty()
+ return (0, head)
+
+ def patch(self, repo, patchfile):
+ '''Apply patchfile to the working directory.
+ patchfile: name of patch file'''
+ files = {}
+ try:
+ fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
+ files=files, eolmode=None)
+ except Exception, inst:
+ self.ui.note(str(inst) + '\n')
+ if not self.ui.verbose:
+ self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
+ return (False, files, False)
+
+ return (True, files, fuzz)
+
+ def apply(self, repo, series, list=False, update_status=True,
+ strict=False, patchdir=None, merge=None, all_files={}):
+ wlock = lock = tr = None
+ try:
+ wlock = repo.wlock()
+ lock = repo.lock()
+ tr = repo.transaction()
+ try:
+ ret = self._apply(repo, series, list, update_status,
+ strict, patchdir, merge, all_files=all_files)
+ tr.close()
+ self.save_dirty()
+ return ret
+ except:
+ try:
+ tr.abort()
+ finally:
+ repo.invalidate()
+ repo.dirstate.invalidate()
+ raise
+ finally:
+ del tr
+ release(lock, wlock)
+ self.removeundo(repo)
+
+ def _apply(self, repo, series, list=False, update_status=True,
+ strict=False, patchdir=None, merge=None, all_files={}):
+ '''returns (error, hash)
+ error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
+ # TODO unify with commands.py
+ if not patchdir:
+ patchdir = self.path
+ err = 0
+ n = None
+ for patchname in series:
+ pushable, reason = self.pushable(patchname)
+ if not pushable:
+ self.explain_pushable(patchname, all_patches=True)
+ continue
+ self.ui.status(_("applying %s\n") % patchname)
+ pf = os.path.join(patchdir, patchname)
+
+ try:
+ ph = patchheader(self.join(patchname))
+ except:
+ self.ui.warn(_("unable to read %s\n") % patchname)
+ err = 1
+ break
+
+ message = ph.message
+ if not message:
+ message = _("imported patch %s\n") % patchname
+ else:
+ if list:
+ message.append(_("\nimported patch %s") % patchname)
+ message = '\n'.join(message)
+
+ if ph.haspatch:
+ (patcherr, files, fuzz) = self.patch(repo, pf)
+ all_files.update(files)
+ patcherr = not patcherr
+ else:
+ self.ui.warn(_("patch %s is empty\n") % patchname)
+ patcherr, files, fuzz = 0, [], 0
+
+ if merge and files:
+ # Mark as removed/merged and update dirstate parent info
+ removed = []
+ merged = []
+ for f in files:
+ if os.path.exists(repo.wjoin(f)):
+ merged.append(f)
+ else:
+ removed.append(f)
+ for f in removed:
+ repo.dirstate.remove(f)
+ for f in merged:
+ repo.dirstate.merge(f)
+ p1, p2 = repo.dirstate.parents()
+ repo.dirstate.setparents(p1, merge)
+
+ files = patch.updatedir(self.ui, repo, files)
+ match = cmdutil.matchfiles(repo, files or [])
+ n = repo.commit(message, ph.user, ph.date, match=match, force=True)
+
+ if n is None:
+ raise util.Abort(_("repo commit failed"))
+
+ if update_status:
+ self.applied.append(statusentry(hex(n), patchname))
+
+ if patcherr:
+ self.ui.warn(_("patch failed, rejects left in working dir\n"))
+ err = 2
+ break
+
+ if fuzz and strict:
+ self.ui.warn(_("fuzz found when applying patch, stopping\n"))
+ err = 3
+ break
+ return (err, n)
+
+ def _cleanup(self, patches, numrevs, keep=False):
+ if not keep:
+ r = self.qrepo()
+ if r:
+ r.remove(patches, True)
+ else:
+ for p in patches:
+ os.unlink(self.join(p))
+
+ if numrevs:
+ del self.applied[:numrevs]
+ self.applied_dirty = 1
+
+ for i in sorted([self.find_series(p) for p in patches], reverse=True):
+ del self.full_series[i]
+ self.parse_series()
+ self.series_dirty = 1
+
+ def _revpatches(self, repo, revs):
+ firstrev = repo[self.applied[0].rev].rev()
+ patches = []
+ for i, rev in enumerate(revs):
+
+ if rev < firstrev:
+ raise util.Abort(_('revision %d is not managed') % rev)
+
+ ctx = repo[rev]
+ base = bin(self.applied[i].rev)
+ if ctx.node() != base:
+ msg = _('cannot delete revision %d above applied patches')
+ raise util.Abort(msg % rev)
+
+ patch = self.applied[i].name
+ for fmt in ('[mq]: %s', 'imported patch %s'):
+ if ctx.description() == fmt % patch:
+ msg = _('patch %s finalized without changeset message\n')
+ repo.ui.status(msg % patch)
+ break
+
+ patches.append(patch)
+ return patches
+
+ def finish(self, repo, revs):
+ patches = self._revpatches(repo, sorted(revs))
+ self._cleanup(patches, len(patches))
+
+ def delete(self, repo, patches, opts):
+ if not patches and not opts.get('rev'):
+ raise util.Abort(_('qdelete requires at least one revision or '
+ 'patch name'))
+
+ realpatches = []
+ for patch in patches:
+ patch = self.lookup(patch, strict=True)
+ info = self.isapplied(patch)
+ if info:
+ raise util.Abort(_("cannot delete applied patch %s") % patch)
+ if patch not in self.series:
+ raise util.Abort(_("patch %s not in series file") % patch)
+ realpatches.append(patch)
+
+ numrevs = 0
+ if opts.get('rev'):
+ if not self.applied:
+ raise util.Abort(_('no patches applied'))
+ revs = cmdutil.revrange(repo, opts['rev'])
+ if len(revs) > 1 and revs[0] > revs[1]:
+ revs.reverse()
+ revpatches = self._revpatches(repo, revs)
+ realpatches += revpatches
+ numrevs = len(revpatches)
+
+ self._cleanup(realpatches, numrevs, opts.get('keep'))
+
+ def check_toppatch(self, repo):
+ if len(self.applied) > 0:
+ top = bin(self.applied[-1].rev)
+ pp = repo.dirstate.parents()
+ if top not in pp:
+ raise util.Abort(_("working directory revision is not qtip"))
+ return top
+ return None
+ def check_localchanges(self, repo, force=False, refresh=True):
+ m, a, r, d = repo.status()[:4]
+ if m or a or r or d:
+ if not force:
+ if refresh:
+ raise util.Abort(_("local changes found, refresh first"))
+ else:
+ raise util.Abort(_("local changes found"))
+ return m, a, r, d
+
+ _reserved = ('series', 'status', 'guards')
+ def check_reserved_name(self, name):
+ if (name in self._reserved or name.startswith('.hg')
+ or name.startswith('.mq')):
+ raise util.Abort(_('"%s" cannot be used as the name of a patch')
+ % name)
+
+ def new(self, repo, patchfn, *pats, **opts):
+ """options:
+ msg: a string or a no-argument function returning a string
+ """
+ msg = opts.get('msg')
+ force = opts.get('force')
+ user = opts.get('user')
+ date = opts.get('date')
+ if date:
+ date = util.parsedate(date)
+ self.check_reserved_name(patchfn)
+ if os.path.exists(self.join(patchfn)):
+ raise util.Abort(_('patch "%s" already exists') % patchfn)
+ if opts.get('include') or opts.get('exclude') or pats:
+ match = cmdutil.match(repo, pats, opts)
+ # detect missing files in pats
+ def badfn(f, msg):
+ raise util.Abort('%s: %s' % (f, msg))
+ match.bad = badfn
+ m, a, r, d = repo.status(match=match)[:4]
+ else:
+ m, a, r, d = self.check_localchanges(repo, force)
+ match = cmdutil.matchfiles(repo, m + a + r)
+ commitfiles = m + a + r
+ self.check_toppatch(repo)
+ insert = self.full_series_end()
+ wlock = repo.wlock()
+ try:
+ # if patch file write fails, abort early
+ p = self.opener(patchfn, "w")
+ try:
+ if date:
+ p.write("# HG changeset patch\n")
+ if user:
+ p.write("# User " + user + "\n")
+ p.write("# Date %d %d\n\n" % date)
+ elif user:
+ p.write("From: " + user + "\n\n")
+
+ if hasattr(msg, '__call__'):
+ msg = msg()
+ commitmsg = msg and msg or ("[mq]: %s" % patchfn)
+ n = repo.commit(commitmsg, user, date, match=match, force=True)
+ if n is None:
+ raise util.Abort(_("repo commit failed"))
+ try:
+ self.full_series[insert:insert] = [patchfn]
+ self.applied.append(statusentry(hex(n), patchfn))
+ self.parse_series()
+ self.series_dirty = 1
+ self.applied_dirty = 1
+ if msg:
+ msg = msg + "\n\n"
+ p.write(msg)
+ if commitfiles:
+ diffopts = self.diffopts()
+ if opts.get('git'): diffopts.git = True
+ parent = self.qparents(repo, n)
+ chunks = patch.diff(repo, node1=parent, node2=n,
+ match=match, opts=diffopts)
+ for chunk in chunks:
+ p.write(chunk)
+ p.close()
+ wlock.release()
+ wlock = None
+ r = self.qrepo()
+ if r: r.add([patchfn])
+ except:
+ repo.rollback()
+ raise
+ except Exception:
+ patchpath = self.join(patchfn)
+ try:
+ os.unlink(patchpath)
+ except:
+ self.ui.warn(_('error unlinking %s\n') % patchpath)
+ raise
+ self.removeundo(repo)
+ finally:
+ release(wlock)
+
+ def strip(self, repo, rev, update=True, backup="all", force=None):
+ wlock = lock = None
+ try:
+ wlock = repo.wlock()
+ lock = repo.lock()
+
+ if update:
+ self.check_localchanges(repo, force=force, refresh=False)
+ urev = self.qparents(repo, rev)
+ hg.clean(repo, urev)
+ repo.dirstate.write()
+
+ self.removeundo(repo)
+ repair.strip(self.ui, repo, rev, backup)
+ # strip may have unbundled a set of backed up revisions after
+ # the actual strip
+ self.removeundo(repo)
+ finally:
+ release(lock, wlock)
+
+ def isapplied(self, patch):
+ """returns (index, rev, patch)"""
+ for i, a in enumerate(self.applied):
+ if a.name == patch:
+ return (i, a.rev, a.name)
+ return None
+
+ # if the exact patch name does not exist, we try a few
+ # variations. If strict is passed, we try only #1
+ #
+ # 1) a number to indicate an offset in the series file
+ # 2) a unique substring of the patch name was given
+ # 3) patchname[-+]num to indicate an offset in the series file
+ def lookup(self, patch, strict=False):
+ patch = patch and str(patch)
+
+ def partial_name(s):
+ if s in self.series:
+ return s
+ matches = [x for x in self.series if s in x]
+ if len(matches) > 1:
+ self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
+ for m in matches:
+ self.ui.warn(' %s\n' % m)
+ return None
+ if matches:
+ return matches[0]
+ if len(self.series) > 0 and len(self.applied) > 0:
+ if s == 'qtip':
+ return self.series[self.series_end(True)-1]
+ if s == 'qbase':
+ return self.series[0]
+ return None
+
+ if patch is None:
+ return None
+ if patch in self.series:
+ return patch
+
+ if not os.path.isfile(self.join(patch)):
+ try:
+ sno = int(patch)
+ except(ValueError, OverflowError):
+ pass
+ else:
+ if -len(self.series) <= sno < len(self.series):
+ return self.series[sno]
+
+ if not strict:
+ res = partial_name(patch)
+ if res:
+ return res
+ minus = patch.rfind('-')
+ if minus >= 0:
+ res = partial_name(patch[:minus])
+ if res:
+ i = self.series.index(res)
+ try:
+ off = int(patch[minus+1:] or 1)
+ except(ValueError, OverflowError):
+ pass
+ else:
+ if i - off >= 0:
+ return self.series[i - off]
+ plus = patch.rfind('+')
+ if plus >= 0:
+ res = partial_name(patch[:plus])
+ if res:
+ i = self.series.index(res)
+ try:
+ off = int(patch[plus+1:] or 1)
+ except(ValueError, OverflowError):
+ pass
+ else:
+ if i + off < len(self.series):
+ return self.series[i + off]
+ raise util.Abort(_("patch %s not in series") % patch)
+
+ def push(self, repo, patch=None, force=False, list=False,
+ mergeq=None, all=False):
+ wlock = repo.wlock()
+ try:
+ if repo.dirstate.parents()[0] not in repo.heads():
+ self.ui.status(_("(working directory not at a head)\n"))
+
+ if not self.series:
+ self.ui.warn(_('no patches in series\n'))
+ return 0
+
+ patch = self.lookup(patch)
+ # Suppose our series file is: A B C and the current 'top'
+ # patch is B. qpush C should be performed (moving forward)
+ # qpush B is a NOP (no change) qpush A is an error (can't
+ # go backwards with qpush)
+ if patch:
+ info = self.isapplied(patch)
+ if info:
+ if info[0] < len(self.applied) - 1:
+ raise util.Abort(
+ _("cannot push to a previous patch: %s") % patch)
+ self.ui.warn(
+ _('qpush: %s is already at the top\n') % patch)
+ return
+ pushable, reason = self.pushable(patch)
+ if not pushable:
+ if reason:
+ reason = _('guarded by %r') % reason
+ else:
+ reason = _('no matching guards')
+ self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
+ return 1
+ elif all:
+ patch = self.series[-1]
+ if self.isapplied(patch):
+ self.ui.warn(_('all patches are currently applied\n'))
+ return 0
+
+ # Following the above example, starting at 'top' of B:
+ # qpush should be performed (pushes C), but a subsequent
+ # qpush without an argument is an error (nothing to
+ # apply). This allows a loop of "...while hg qpush..." to
+ # work as it detects an error when done
+ start = self.series_end()
+ if start == len(self.series):
+ self.ui.warn(_('patch series already fully applied\n'))
+ return 1
+ if not force:
+ self.check_localchanges(repo)
+
+ self.applied_dirty = 1
+ if start > 0:
+ self.check_toppatch(repo)
+ if not patch:
+ patch = self.series[start]
+ end = start + 1
+ else:
+ end = self.series.index(patch, start) + 1
+
+ s = self.series[start:end]
+ all_files = {}
+ try:
+ if mergeq:
+ ret = self.mergepatch(repo, mergeq, s)
+ else:
+ ret = self.apply(repo, s, list, all_files=all_files)
+ except:
+ self.ui.warn(_('cleaning up working directory...'))
+ node = repo.dirstate.parents()[0]
+ hg.revert(repo, node, None)
+ unknown = repo.status(unknown=True)[4]
+ # only remove unknown files that we know we touched or
+ # created while patching
+ for f in unknown:
+ if f in all_files:
+ util.unlink(repo.wjoin(f))
+ self.ui.warn(_('done\n'))
+ raise
+
+ top = self.applied[-1].name
+ if ret[0] and ret[0] > 1:
+ msg = _("errors during apply, please fix and refresh %s\n")
+ self.ui.write(msg % top)
+ else:
+ self.ui.write(_("now at: %s\n") % top)
+ return ret[0]
+
+ finally:
+ wlock.release()
+
+ def pop(self, repo, patch=None, force=False, update=True, all=False):
+ def getfile(f, rev, flags):
+ t = repo.file(f).read(rev)
+ repo.wwrite(f, t, flags)
+
+ wlock = repo.wlock()
+ try:
+ if patch:
+ # index, rev, patch
+ info = self.isapplied(patch)
+ if not info:
+ patch = self.lookup(patch)
+ info = self.isapplied(patch)
+ if not info:
+ raise util.Abort(_("patch %s is not applied") % patch)
+
+ if len(self.applied) == 0:
+ # Allow qpop -a to work repeatedly,
+ # but not qpop without an argument
+ self.ui.warn(_("no patches applied\n"))
+ return not all
+
+ if all:
+ start = 0
+ elif patch:
+ start = info[0] + 1
+ else:
+ start = len(self.applied) - 1
+
+ if start >= len(self.applied):
+ self.ui.warn(_("qpop: %s is already at the top\n") % patch)
+ return
+
+ if not update:
+ parents = repo.dirstate.parents()
+ rr = [ bin(x.rev) for x in self.applied ]
+ for p in parents:
+ if p in rr:
+ self.ui.warn(_("qpop: forcing dirstate update\n"))
+ update = True
+ else:
+ parents = [p.hex() for p in repo[None].parents()]
+ needupdate = False
+ for entry in self.applied[start:]:
+ if entry.rev in parents:
+ needupdate = True
+ break
+ update = needupdate
+
+ if not force and update:
+ self.check_localchanges(repo)
+
+ self.applied_dirty = 1
+ end = len(self.applied)
+ rev = bin(self.applied[start].rev)
+ if update:
+ top = self.check_toppatch(repo)
+
+ try:
+ heads = repo.changelog.heads(rev)
+ except error.LookupError:
+ node = short(rev)
+ raise util.Abort(_('trying to pop unknown node %s') % node)
+
+ if heads != [bin(self.applied[-1].rev)]:
+ raise util.Abort(_("popping would remove a revision not "
+ "managed by this patch queue"))
+
+ # we know there are no local changes, so we can make a simplified
+ # form of hg.update.
+ if update:
+ qp = self.qparents(repo, rev)
+ changes = repo.changelog.read(qp)
+ mmap = repo.manifest.read(changes[0])
+ m, a, r, d = repo.status(qp, top)[:4]
+ if d:
+ raise util.Abort(_("deletions found between repo revs"))
+ for f in m:
+ getfile(f, mmap[f], mmap.flags(f))
+ for f in r:
+ getfile(f, mmap[f], mmap.flags(f))
+ for f in m + r:
+ repo.dirstate.normal(f)
+ for f in a:
+ try:
+ os.unlink(repo.wjoin(f))
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ try: os.removedirs(os.path.dirname(repo.wjoin(f)))
+ except: pass
+ repo.dirstate.forget(f)
+ repo.dirstate.setparents(qp, nullid)
+ for patch in reversed(self.applied[start:end]):
+ self.ui.status(_("popping %s\n") % patch.name)
+ del self.applied[start:end]
+ self.strip(repo, rev, update=False, backup='strip')
+ if len(self.applied):
+ self.ui.write(_("now at: %s\n") % self.applied[-1].name)
+ else:
+ self.ui.write(_("patch queue now empty\n"))
+ finally:
+ wlock.release()
+
+ def diff(self, repo, pats, opts):
+ top = self.check_toppatch(repo)
+ if not top:
+ self.ui.write(_("no patches applied\n"))
+ return
+ qp = self.qparents(repo, top)
+ self._diffopts = patch.diffopts(self.ui, opts)
+ self.printdiff(repo, qp, files=pats, opts=opts)
+
+ def refresh(self, repo, pats=None, **opts):
+ if len(self.applied) == 0:
+ self.ui.write(_("no patches applied\n"))
+ return 1
+ msg = opts.get('msg', '').rstrip()
+ newuser = opts.get('user')
+ newdate = opts.get('date')
+ if newdate:
+ newdate = '%d %d' % util.parsedate(newdate)
+ wlock = repo.wlock()
+ try:
+ self.check_toppatch(repo)
+ (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
+ top = bin(top)
+ if repo.changelog.heads(top) != [top]:
+ raise util.Abort(_("cannot refresh a revision with children"))
+ cparents = repo.changelog.parents(top)
+ patchparent = self.qparents(repo, top)
+ ph = patchheader(self.join(patchfn))
+
+ patchf = self.opener(patchfn, 'r')
+
+ # if the patch was a git patch, refresh it as a git patch
+ for line in patchf:
+ if line.startswith('diff --git'):
+ self.diffopts().git = True
+ break
+
+ if msg:
+ ph.setmessage(msg)
+ if newuser:
+ ph.setuser(newuser)
+ if newdate:
+ ph.setdate(newdate)
+
+ # only commit new patch when write is complete
+ patchf = self.opener(patchfn, 'w', atomictemp=True)
+
+ patchf.seek(0)
+ patchf.truncate()
+
+ comments = str(ph)
+ if comments:
+ patchf.write(comments)
+
+ if opts.get('git'):
+ self.diffopts().git = True
+ tip = repo.changelog.tip()
+ if top == tip:
+ # if the top of our patch queue is also the tip, there is an
+ # optimization here. We update the dirstate in place and strip
+ # off the tip commit. Then just commit the current directory
+ # tree. We can also send repo.commit the list of files
+ # changed to speed up the diff
+ #
+ # in short mode, we only diff the files included in the
+ # patch already plus specified files
+ #
+ # this should really read:
+ # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
+ # but we do it backwards to take advantage of manifest/chlog
+ # caching against the next repo.status call
+ #
+ mm, aa, dd, aa2 = repo.status(patchparent, tip)[:4]
+ changes = repo.changelog.read(tip)
+ man = repo.manifest.read(changes[0])
+ aaa = aa[:]
+ matchfn = cmdutil.match(repo, pats, opts)
+ if opts.get('short'):
+ # if amending a patch, we start with existing
+ # files plus specified files - unfiltered
+ match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
+ # filter with inc/exl options
+ matchfn = cmdutil.match(repo, opts=opts)
+ else:
+ match = cmdutil.matchall(repo)
+ m, a, r, d = repo.status(match=match)[:4]
+
+ # we might end up with files that were added between
+ # tip and the dirstate parent, but then changed in the
+ # local dirstate. in this case, we want them to only
+ # show up in the added section
+ for x in m:
+ if x not in aa:
+ mm.append(x)
+ # we might end up with files added by the local dirstate that
+ # were deleted by the patch. In this case, they should only
+ # show up in the changed section.
+ for x in a:
+ if x in dd:
+ del dd[dd.index(x)]
+ mm.append(x)
+ else:
+ aa.append(x)
+ # make sure any files deleted in the local dirstate
+ # are not in the add or change column of the patch
+ forget = []
+ for x in d + r:
+ if x in aa:
+ del aa[aa.index(x)]
+ forget.append(x)
+ continue
+ elif x in mm:
+ del mm[mm.index(x)]
+ dd.append(x)
+
+ m = list(set(mm))
+ r = list(set(dd))
+ a = list(set(aa))
+ c = [filter(matchfn, l) for l in (m, a, r)]
+ match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
+ chunks = patch.diff(repo, patchparent, match=match,
+ changes=c, opts=self.diffopts())
+ for chunk in chunks:
+ patchf.write(chunk)
+
+ try:
+ if self.diffopts().git:
+ copies = {}
+ for dst in a:
+ src = repo.dirstate.copied(dst)
+ # during qfold, the source file for copies may
+ # be removed. Treat this as a simple add.
+ if src is not None and src in repo.dirstate:
+ copies.setdefault(src, []).append(dst)
+ repo.dirstate.add(dst)
+ # remember the copies between patchparent and tip
+ for dst in aaa:
+ f = repo.file(dst)
+ src = f.renamed(man[dst])
+ if src:
+ copies.setdefault(src[0], []).extend(copies.get(dst, []))
+ if dst in a:
+ copies[src[0]].append(dst)
+ # we can't copy a file created by the patch itself
+ if dst in copies:
+ del copies[dst]
+ for src, dsts in copies.iteritems():
+ for dst in dsts:
+ repo.dirstate.copy(src, dst)
+ else:
+ for dst in a:
+ repo.dirstate.add(dst)
+ # Drop useless copy information
+ for f in list(repo.dirstate.copies()):
+ repo.dirstate.copy(None, f)
+ for f in r:
+ repo.dirstate.remove(f)
+ # if the patch excludes a modified file, mark that
+ # file with mtime=0 so status can see it.
+ mm = []
+ for i in xrange(len(m)-1, -1, -1):
+ if not matchfn(m[i]):
+ mm.append(m[i])
+ del m[i]
+ for f in m:
+ repo.dirstate.normal(f)
+ for f in mm:
+ repo.dirstate.normallookup(f)
+ for f in forget:
+ repo.dirstate.forget(f)
+
+ if not msg:
+ if not ph.message:
+ message = "[mq]: %s\n" % patchfn
+ else:
+ message = "\n".join(ph.message)
+ else:
+ message = msg
+
+ user = ph.user or changes[1]
+
+ # assumes strip can roll itself back if interrupted
+ repo.dirstate.setparents(*cparents)
+ self.applied.pop()
+ self.applied_dirty = 1
+ self.strip(repo, top, update=False,
+ backup='strip')
+ except:
+ repo.dirstate.invalidate()
+ raise
+
+ try:
+ # might be nice to attempt to roll back strip after this
+ patchf.rename()
+ n = repo.commit(message, user, ph.date, match=match,
+ force=True)
+ self.applied.append(statusentry(hex(n), patchfn))
+ except:
+ ctx = repo[cparents[0]]
+ repo.dirstate.rebuild(ctx.node(), ctx.manifest())
+ self.save_dirty()
+ self.ui.warn(_('refresh interrupted while patch was popped! '
+ '(revert --all, qpush to recover)\n'))
+ raise
+ else:
+ self.printdiff(repo, patchparent, fp=patchf)
+ patchf.rename()
+ added = repo.status()[1]
+ for a in added:
+ f = repo.wjoin(a)
+ try:
+ os.unlink(f)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ try: os.removedirs(os.path.dirname(f))
+ except: pass
+ # forget the file copies in the dirstate
+ # push should readd the files later on
+ repo.dirstate.forget(a)
+ self.pop(repo, force=True)
+ self.push(repo, force=True)
+ finally:
+ wlock.release()
+ self.removeundo(repo)
+
+ def init(self, repo, create=False):
+ if not create and os.path.isdir(self.path):
+ raise util.Abort(_("patch queue directory already exists"))
+ try:
+ os.mkdir(self.path)
+ except OSError, inst:
+ if inst.errno != errno.EEXIST or not create:
+ raise
+ if create:
+ return self.qrepo(create=True)
+
+ def unapplied(self, repo, patch=None):
+ if patch and patch not in self.series:
+ raise util.Abort(_("patch %s is not in series file") % patch)
+ if not patch:
+ start = self.series_end()
+ else:
+ start = self.series.index(patch) + 1
+ unapplied = []
+ for i in xrange(start, len(self.series)):
+ pushable, reason = self.pushable(i)
+ if pushable:
+ unapplied.append((i, self.series[i]))
+ self.explain_pushable(i)
+ return unapplied
+
+ def qseries(self, repo, missing=None, start=0, length=None, status=None,
+ summary=False):
+ def displayname(pfx, patchname):
+ if summary:
+ ph = patchheader(self.join(patchname))
+ msg = ph.message
+ msg = msg and ': ' + msg[0] or ': '
+ else:
+ msg = ''
+ msg = "%s%s%s" % (pfx, patchname, msg)
+ if self.ui.interactive():
+ msg = util.ellipsis(msg, util.termwidth())
+ self.ui.write(msg + '\n')
+
+ applied = set([p.name for p in self.applied])
+ if length is None:
+ length = len(self.series) - start
+ if not missing:
+ if self.ui.verbose:
+ idxwidth = len(str(start+length - 1))
+ for i in xrange(start, start+length):
+ patch = self.series[i]
+ if patch in applied:
+ stat = 'A'
+ elif self.pushable(i)[0]:
+ stat = 'U'
+ else:
+ stat = 'G'
+ pfx = ''
+ if self.ui.verbose:
+ pfx = '%*d %s ' % (idxwidth, i, stat)
+ elif status and status != stat:
+ continue
+ displayname(pfx, patch)
+ else:
+ msng_list = []
+ for root, dirs, files in os.walk(self.path):
+ d = root[len(self.path) + 1:]
+ for f in files:
+ fl = os.path.join(d, f)
+ if (fl not in self.series and
+ fl not in (self.status_path, self.series_path,
+ self.guards_path)
+ and not fl.startswith('.')):
+ msng_list.append(fl)
+ for x in sorted(msng_list):
+ pfx = self.ui.verbose and ('D ') or ''
+ displayname(pfx, x)
+
+ def issaveline(self, l):
+ if l.name == '.hg.patches.save.line':
+ return True
+
+ def qrepo(self, create=False):
+ if create or os.path.isdir(self.join(".hg")):
+ return hg.repository(self.ui, path=self.path, create=create)
+
+ def restore(self, repo, rev, delete=None, qupdate=None):
+ c = repo.changelog.read(rev)
+ desc = c[4].strip()
+ lines = desc.splitlines()
+ i = 0
+ datastart = None
+ series = []
+ applied = []
+ qpp = None
+ for i, line in enumerate(lines):
+ if line == 'Patch Data:':
+ datastart = i + 1
+ elif line.startswith('Dirstate:'):
+ l = line.rstrip()
+ l = l[10:].split(' ')
+ qpp = [ bin(x) for x in l ]
+ elif datastart != None:
+ l = line.rstrip()
+ se = statusentry(l)
+ file_ = se.name
+ if se.rev:
+ applied.append(se)
+ else:
+ series.append(file_)
+ if datastart is None:
+ self.ui.warn(_("No saved patch data found\n"))
+ return 1
+ self.ui.warn(_("restoring status: %s\n") % lines[0])
+ self.full_series = series
+ self.applied = applied
+ self.parse_series()
+ self.series_dirty = 1
+ self.applied_dirty = 1
+ heads = repo.changelog.heads()
+ if delete:
+ if rev not in heads:
+ self.ui.warn(_("save entry has children, leaving it alone\n"))
+ else:
+ self.ui.warn(_("removing save entry %s\n") % short(rev))
+ pp = repo.dirstate.parents()
+ if rev in pp:
+ update = True
+ else:
+ update = False
+ self.strip(repo, rev, update=update, backup='strip')
+ if qpp:
+ self.ui.warn(_("saved queue repository parents: %s %s\n") %
+ (short(qpp[0]), short(qpp[1])))
+ if qupdate:
+ self.ui.status(_("queue directory updating\n"))
+ r = self.qrepo()
+ if not r:
+ self.ui.warn(_("Unable to load queue repository\n"))
+ return 1
+ hg.clean(r, qpp[0])
+
+ def save(self, repo, msg=None):
+ if len(self.applied) == 0:
+ self.ui.warn(_("save: no patches applied, exiting\n"))
+ return 1
+ if self.issaveline(self.applied[-1]):
+ self.ui.warn(_("status is already saved\n"))
+ return 1
+
+ ar = [ ':' + x for x in self.full_series ]
+ if not msg:
+ msg = _("hg patches saved state")
+ else:
+ msg = "hg patches: " + msg.rstrip('\r\n')
+ r = self.qrepo()
+ if r:
+ pp = r.dirstate.parents()
+ msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
+ msg += "\n\nPatch Data:\n"
+ text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
+ "\n".join(ar) + '\n' or "")
+ n = repo.commit(text, force=True)
+ if not n:
+ self.ui.warn(_("repo commit failed\n"))
+ return 1
+ self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
+ self.applied_dirty = 1
+ self.removeundo(repo)
+
+ def full_series_end(self):
+ if len(self.applied) > 0:
+ p = self.applied[-1].name
+ end = self.find_series(p)
+ if end is None:
+ return len(self.full_series)
+ return end + 1
+ return 0
+
+ def series_end(self, all_patches=False):
+ """If all_patches is False, return the index of the next pushable patch
+ in the series, or the series length. If all_patches is True, return the
+ index of the first patch past the last applied one.
+ """
+ end = 0
+ def next(start):
+ if all_patches:
+ return start
+ i = start
+ while i < len(self.series):
+ p, reason = self.pushable(i)
+ if p:
+ break
+ self.explain_pushable(i)
+ i += 1
+ return i
+ if len(self.applied) > 0:
+ p = self.applied[-1].name
+ try:
+ end = self.series.index(p)
+ except ValueError:
+ return 0
+ return next(end + 1)
+ return next(end)
+
+ def appliedname(self, index):
+ pname = self.applied[index].name
+ if not self.ui.verbose:
+ p = pname
+ else:
+ p = str(self.series.index(pname)) + " " + pname
+ return p
+
+ def qimport(self, repo, files, patchname=None, rev=None, existing=None,
+ force=None, git=False):
+ def checkseries(patchname):
+ if patchname in self.series:
+ raise util.Abort(_('patch %s is already in the series file')
+ % patchname)
+ def checkfile(patchname):
+ if not force and os.path.exists(self.join(patchname)):
+ raise util.Abort(_('patch "%s" already exists')
+ % patchname)
+
+ if rev:
+ if files:
+ raise util.Abort(_('option "-r" not valid when importing '
+ 'files'))
+ rev = cmdutil.revrange(repo, rev)
+ rev.sort(reverse=True)
+ if (len(files) > 1 or len(rev) > 1) and patchname:
+ raise util.Abort(_('option "-n" not valid when importing multiple '
+ 'patches'))
+ i = 0
+ added = []
+ if rev:
+ # If mq patches are applied, we can only import revisions
+ # that form a linear path to qbase.
+ # Otherwise, they should form a linear path to a head.
+ heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
+ if len(heads) > 1:
+ raise util.Abort(_('revision %d is the root of more than one '
+ 'branch') % rev[-1])
+ if self.applied:
+ base = hex(repo.changelog.node(rev[0]))
+ if base in [n.rev for n in self.applied]:
+ raise util.Abort(_('revision %d is already managed')
+ % rev[0])
+ if heads != [bin(self.applied[-1].rev)]:
+ raise util.Abort(_('revision %d is not the parent of '
+ 'the queue') % rev[0])
+ base = repo.changelog.rev(bin(self.applied[0].rev))
+ lastparent = repo.changelog.parentrevs(base)[0]
+ else:
+ if heads != [repo.changelog.node(rev[0])]:
+ raise util.Abort(_('revision %d has unmanaged children')
+ % rev[0])
+ lastparent = None
+
+ if git:
+ self.diffopts().git = True
+
+ for r in rev:
+ p1, p2 = repo.changelog.parentrevs(r)
+ n = repo.changelog.node(r)
+ if p2 != nullrev:
+ raise util.Abort(_('cannot import merge revision %d') % r)
+ if lastparent and lastparent != r:
+ raise util.Abort(_('revision %d is not the parent of %d')
+ % (r, lastparent))
+ lastparent = p1
+
+ if not patchname:
+ patchname = normname('%d.diff' % r)
+ self.check_reserved_name(patchname)
+ checkseries(patchname)
+ checkfile(patchname)
+ self.full_series.insert(0, patchname)
+
+ patchf = self.opener(patchname, "w")
+ patch.export(repo, [n], fp=patchf, opts=self.diffopts())
+ patchf.close()
+
+ se = statusentry(hex(n), patchname)
+ self.applied.insert(0, se)
+
+ added.append(patchname)
+ patchname = None
+ self.parse_series()
+ self.applied_dirty = 1
+
+ for filename in files:
+ if existing:
+ if filename == '-':
+ raise util.Abort(_('-e is incompatible with import from -'))
+ if not patchname:
+ patchname = normname(filename)
+ self.check_reserved_name(patchname)
+ if not os.path.isfile(self.join(patchname)):
+ raise util.Abort(_("patch %s does not exist") % patchname)
+ else:
+ try:
+ if filename == '-':
+ if not patchname:
+ raise util.Abort(_('need --name to import a patch from -'))
+ text = sys.stdin.read()
+ else:
+ text = url.open(self.ui, filename).read()
+ except (OSError, IOError):
+ raise util.Abort(_("unable to read %s") % filename)
+ if not patchname:
+ patchname = normname(os.path.basename(filename))
+ self.check_reserved_name(patchname)
+ checkfile(patchname)
+ patchf = self.opener(patchname, "w")
+ patchf.write(text)
+ if not force:
+ checkseries(patchname)
+ if patchname not in self.series:
+ index = self.full_series_end() + i
+ self.full_series[index:index] = [patchname]
+ self.parse_series()
+ self.ui.warn(_("adding %s to series file\n") % patchname)
+ i += 1
+ added.append(patchname)
+ patchname = None
+ self.series_dirty = 1
+ qrepo = self.qrepo()
+ if qrepo:
+ qrepo.add(added)
+
+def delete(ui, repo, *patches, **opts):
+ """remove patches from queue
+
+ The patches must not be applied, and at least one patch is required. With
+ -k/--keep, the patch files are preserved in the patch directory.
+
+ To stop managing a patch and move it into permanent history,
+ use the qfinish command."""
+ q = repo.mq
+ q.delete(repo, patches, opts)
+ q.save_dirty()
+ return 0
+
+def applied(ui, repo, patch=None, **opts):
+ """print the patches already applied"""
+ q = repo.mq
+ if patch:
+ if patch not in q.series:
+ raise util.Abort(_("patch %s is not in series file") % patch)
+ end = q.series.index(patch) + 1
+ else:
+ end = q.series_end(True)
+ return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
+
+def unapplied(ui, repo, patch=None, **opts):
+ """print the patches not yet applied"""
+ q = repo.mq
+ if patch:
+ if patch not in q.series:
+ raise util.Abort(_("patch %s is not in series file") % patch)
+ start = q.series.index(patch) + 1
+ else:
+ start = q.series_end(True)
+ q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
+
+def qimport(ui, repo, *filename, **opts):
+ """import a patch
+
+ The patch is inserted into the series after the last applied
+ patch. If no patches have been applied, qimport prepends the patch
+ to the series.
+
+ The patch will have the same name as its source file unless you
+ give it a new one with -n/--name.
+
+ You can register an existing patch inside the patch directory with
+ the -e/--existing flag.
+
+ With -f/--force, an existing patch of the same name will be
+ overwritten.
+
+ An existing changeset may be placed under mq control with -r/--rev
+ (e.g. qimport --rev tip -n patch will place tip under mq control).
+ With -g/--git, patches imported with --rev will use the git diff
+ format. See the diffs help topic for information on why this is
+ important for preserving rename/copy information and permission
+ changes.
+
+ To import a patch from standard input, pass - as the patch file.
+ When importing from standard input, a patch name must be specified
+ using the --name flag.
+ """
+ q = repo.mq
+ q.qimport(repo, filename, patchname=opts['name'],
+ existing=opts['existing'], force=opts['force'], rev=opts['rev'],
+ git=opts['git'])
+ q.save_dirty()
+
+ if opts.get('push') and not opts.get('rev'):
+ return q.push(repo, None)
+ return 0
+
+def init(ui, repo, **opts):
+ """init a new queue repository
+
+ The queue repository is unversioned by default. If
+ -c/--create-repo is specified, qinit will create a separate nested
+ repository for patches (qinit -c may also be run later to convert
+ an unversioned patch repository into a versioned one). You can use
+ qcommit to commit changes to this queue repository."""
+ q = repo.mq
+ r = q.init(repo, create=opts['create_repo'])
+ q.save_dirty()
+ if r:
+ if not os.path.exists(r.wjoin('.hgignore')):
+ fp = r.wopener('.hgignore', 'w')
+ fp.write('^\\.hg\n')
+ fp.write('^\\.mq\n')
+ fp.write('syntax: glob\n')
+ fp.write('status\n')
+ fp.write('guards\n')
+ fp.close()
+ if not os.path.exists(r.wjoin('series')):
+ r.wopener('series', 'w').close()
+ r.add(['.hgignore', 'series'])
+ commands.add(ui, r)
+ return 0
+
+def clone(ui, source, dest=None, **opts):
+ '''clone main and patch repository at same time
+
+ If source is local, destination will have no patches applied. If
+ source is remote, this command can not check if patches are
+ applied in source, so cannot guarantee that patches are not
+ applied in destination. If you clone remote repository, be sure
+ before that it has no patches applied.
+
+ Source patch repository is looked for in <src>/.hg/patches by
+ default. Use -p <url> to change.
+
+ The patch directory must be a nested Mercurial repository, as
+ would be created by qinit -c.
+ '''
+ def patchdir(repo):
+ url = repo.url()
+ if url.endswith('/'):
+ url = url[:-1]
+ return url + '/.hg/patches'
+ if dest is None:
+ dest = hg.defaultdest(source)
+ sr = hg.repository(cmdutil.remoteui(ui, opts), ui.expandpath(source))
+ if opts['patches']:
+ patchespath = ui.expandpath(opts['patches'])
+ else:
+ patchespath = patchdir(sr)
+ try:
+ hg.repository(ui, patchespath)
+ except error.RepoError:
+ raise util.Abort(_('versioned patch repository not found'
+ ' (see qinit -c)'))
+ qbase, destrev = None, None
+ if sr.local():
+ if sr.mq.applied:
+ qbase = bin(sr.mq.applied[0].rev)
+ if not hg.islocal(dest):
+ heads = set(sr.heads())
+ destrev = list(heads.difference(sr.heads(qbase)))
+ destrev.append(sr.changelog.parents(qbase)[0])
+ elif sr.capable('lookup'):
+ try:
+ qbase = sr.lookup('qbase')
+ except error.RepoError:
+ pass
+ ui.note(_('cloning main repository\n'))
+ sr, dr = hg.clone(ui, sr.url(), dest,
+ pull=opts['pull'],
+ rev=destrev,
+ update=False,
+ stream=opts['uncompressed'])
+ ui.note(_('cloning patch repository\n'))
+ hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
+ pull=opts['pull'], update=not opts['noupdate'],
+ stream=opts['uncompressed'])
+ if dr.local():
+ if qbase:
+ ui.note(_('stripping applied patches from destination '
+ 'repository\n'))
+ dr.mq.strip(dr, qbase, update=False, backup=None)
+ if not opts['noupdate']:
+ ui.note(_('updating destination repository\n'))
+ hg.update(dr, dr.changelog.tip())
+
+def commit(ui, repo, *pats, **opts):
+ """commit changes in the queue repository"""
+ q = repo.mq
+ r = q.qrepo()
+ if not r: raise util.Abort('no queue repository')
+ commands.commit(r.ui, r, *pats, **opts)
+
+def series(ui, repo, **opts):
+ """print the entire series file"""
+ repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
+ return 0
+
+def top(ui, repo, **opts):
+ """print the name of the current patch"""
+ q = repo.mq
+ t = q.applied and q.series_end(True) or 0
+ if t:
+ return q.qseries(repo, start=t-1, length=1, status='A',
+ summary=opts.get('summary'))
+ else:
+ ui.write(_("no patches applied\n"))
+ return 1
+
+def next(ui, repo, **opts):
+ """print the name of the next patch"""
+ q = repo.mq
+ end = q.series_end()
+ if end == len(q.series):
+ ui.write(_("all patches applied\n"))
+ return 1
+ return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
+
+def prev(ui, repo, **opts):
+ """print the name of the previous patch"""
+ q = repo.mq
+ l = len(q.applied)
+ if l == 1:
+ ui.write(_("only one patch applied\n"))
+ return 1
+ if not l:
+ ui.write(_("no patches applied\n"))
+ return 1
+ return q.qseries(repo, start=l-2, length=1, status='A',
+ summary=opts.get('summary'))
+
+def setupheaderopts(ui, opts):
+ def do(opt, val):
+ if not opts[opt] and opts['current' + opt]:
+ opts[opt] = val
+ do('user', ui.username())
+ do('date', "%d %d" % util.makedate())
+
+def new(ui, repo, patch, *args, **opts):
+ """create a new patch
+
+ qnew creates a new patch on top of the currently-applied patch (if
+ any). It will refuse to run if there are any outstanding changes
+ unless -f/--force is specified, in which case the patch will be
+ initialized with them. You may also use -I/--include,
+ -X/--exclude, and/or a list of files after the patch name to add
+ only changes to matching files to the new patch, leaving the rest
+ as uncommitted modifications.
+
+ -u/--user and -d/--date can be used to set the (given) user and
+ date, respectively. -U/--currentuser and -D/--currentdate set user
+ to current user and date to current date.
+
+ -e/--edit, -m/--message or -l/--logfile set the patch header as
+ well as the commit message. If none is specified, the header is
+ empty and the commit message is '[mq]: PATCH'.
+
+ Use the -g/--git option to keep the patch in the git extended diff
+ format. Read the diffs help topic for more information on why this
+ is important for preserving permission changes and copy/rename
+ information.
+ """
+ msg = cmdutil.logmessage(opts)
+ def getmsg(): return ui.edit(msg, ui.username())
+ q = repo.mq
+ opts['msg'] = msg
+ if opts.get('edit'):
+ opts['msg'] = getmsg
+ else:
+ opts['msg'] = msg
+ setupheaderopts(ui, opts)
+ q.new(repo, patch, *args, **opts)
+ q.save_dirty()
+ return 0
+
+def refresh(ui, repo, *pats, **opts):
+ """update the current patch
+
+ If any file patterns are provided, the refreshed patch will
+ contain only the modifications that match those patterns; the
+ remaining modifications will remain in the working directory.
+
+ If -s/--short is specified, files currently included in the patch
+ will be refreshed just like matched files and remain in the patch.
+
+ hg add/remove/copy/rename work as usual, though you might want to
+ use git-style patches (-g/--git or [diff] git=1) to track copies
+ and renames. See the diffs help topic for more information on the
+ git diff format.
+ """
+ q = repo.mq
+ message = cmdutil.logmessage(opts)
+ if opts['edit']:
+ if not q.applied:
+ ui.write(_("no patches applied\n"))
+ return 1
+ if message:
+ raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
+ patch = q.applied[-1].name
+ ph = patchheader(q.join(patch))
+ message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
+ setupheaderopts(ui, opts)
+ ret = q.refresh(repo, pats, msg=message, **opts)
+ q.save_dirty()
+ return ret
+
+def diff(ui, repo, *pats, **opts):
+ """diff of the current patch and subsequent modifications
+
+ Shows a diff which includes the current patch as well as any
+ changes which have been made in the working directory since the
+ last refresh (thus showing what the current patch would become
+ after a qrefresh).
+
+ Use 'hg diff' if you only want to see the changes made since the
+ last qrefresh, or 'hg export qtip' if you want to see changes made
+ by the current patch without including changes made since the
+ qrefresh.
+ """
+ repo.mq.diff(repo, pats, opts)
+ return 0
+
+def fold(ui, repo, *files, **opts):
+ """fold the named patches into the current patch
+
+ Patches must not yet be applied. Each patch will be successively
+ applied to the current patch in the order given. If all the
+ patches apply successfully, the current patch will be refreshed
+ with the new cumulative patch, and the folded patches will be
+ deleted. With -k/--keep, the folded patch files will not be
+ removed afterwards.
+
+ The header for each folded patch will be concatenated with the
+ current patch header, separated by a line of '* * *'."""
+
+ q = repo.mq
+
+ if not files:
+ raise util.Abort(_('qfold requires at least one patch name'))
+ if not q.check_toppatch(repo):
+ raise util.Abort(_('No patches applied'))
+ q.check_localchanges(repo)
+
+ message = cmdutil.logmessage(opts)
+ if opts['edit']:
+ if message:
+ raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
+
+ parent = q.lookup('qtip')
+ patches = []
+ messages = []
+ for f in files:
+ p = q.lookup(f)
+ if p in patches or p == parent:
+ ui.warn(_('Skipping already folded patch %s') % p)
+ if q.isapplied(p):
+ raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
+ patches.append(p)
+
+ for p in patches:
+ if not message:
+ ph = patchheader(q.join(p))
+ if ph.message:
+ messages.append(ph.message)
+ pf = q.join(p)
+ (patchsuccess, files, fuzz) = q.patch(repo, pf)
+ if not patchsuccess:
+ raise util.Abort(_('Error folding patch %s') % p)
+ patch.updatedir(ui, repo, files)
+
+ if not message:
+ ph = patchheader(q.join(parent))
+ message, user = ph.message, ph.user
+ for msg in messages:
+ message.append('* * *')
+ message.extend(msg)
+ message = '\n'.join(message)
+
+ if opts['edit']:
+ message = ui.edit(message, user or ui.username())
+
+ q.refresh(repo, msg=message)
+ q.delete(repo, patches, opts)
+ q.save_dirty()
+
+def goto(ui, repo, patch, **opts):
+ '''push or pop patches until named patch is at top of stack'''
+ q = repo.mq
+ patch = q.lookup(patch)
+ if q.isapplied(patch):
+ ret = q.pop(repo, patch, force=opts['force'])
+ else:
+ ret = q.push(repo, patch, force=opts['force'])
+ q.save_dirty()
+ return ret
+
+def guard(ui, repo, *args, **opts):
+ '''set or print guards for a patch
+
+ Guards control whether a patch can be pushed. A patch with no
+ guards is always pushed. A patch with a positive guard ("+foo") is
+ pushed only if the qselect command has activated it. A patch with
+ a negative guard ("-foo") is never pushed if the qselect command
+ has activated it.
+
+ With no arguments, print the currently active guards.
+ With arguments, set guards for the named patch.
+ NOTE: Specifying negative guards now requires '--'.
+
+ To set guards on another patch:
+ hg qguard -- other.patch +2.6.17 -stable
+ '''
+ def status(idx):
+ guards = q.series_guards[idx] or ['unguarded']
+ ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
+ q = repo.mq
+ patch = None
+ args = list(args)
+ if opts['list']:
+ if args or opts['none']:
+ raise util.Abort(_('cannot mix -l/--list with options or arguments'))
+ for i in xrange(len(q.series)):
+ status(i)
+ return
+ if not args or args[0][0:1] in '-+':
+ if not q.applied:
+ raise util.Abort(_('no patches applied'))
+ patch = q.applied[-1].name
+ if patch is None and args[0][0:1] not in '-+':
+ patch = args.pop(0)
+ if patch is None:
+ raise util.Abort(_('no patch to work with'))
+ if args or opts['none']:
+ idx = q.find_series(patch)
+ if idx is None:
+ raise util.Abort(_('no patch named %s') % patch)
+ q.set_guards(idx, args)
+ q.save_dirty()
+ else:
+ status(q.series.index(q.lookup(patch)))
+
+def header(ui, repo, patch=None):
+ """print the header of the topmost or specified patch"""
+ q = repo.mq
+
+ if patch:
+ patch = q.lookup(patch)
+ else:
+ if not q.applied:
+ ui.write('no patches applied\n')
+ return 1
+ patch = q.lookup('qtip')
+ ph = patchheader(repo.mq.join(patch))
+
+ ui.write('\n'.join(ph.message) + '\n')
+
+def lastsavename(path):
+ (directory, base) = os.path.split(path)
+ names = os.listdir(directory)
+ namere = re.compile("%s.([0-9]+)" % base)
+ maxindex = None
+ maxname = None
+ for f in names:
+ m = namere.match(f)
+ if m:
+ index = int(m.group(1))
+ if maxindex is None or index > maxindex:
+ maxindex = index
+ maxname = f
+ if maxname:
+ return (os.path.join(directory, maxname), maxindex)
+ return (None, None)
+
+def savename(path):
+ (last, index) = lastsavename(path)
+ if last is None:
+ index = 0
+ newpath = path + ".%d" % (index + 1)
+ return newpath
+
+def push(ui, repo, patch=None, **opts):
+ """push the next patch onto the stack
+
+ When -f/--force is applied, all local changes in patched files
+ will be lost.
+ """
+ q = repo.mq
+ mergeq = None
+
+ if opts['merge']:
+ if opts['name']:
+ newpath = repo.join(opts['name'])
+ else:
+ newpath, i = lastsavename(q.path)
+ if not newpath:
+ ui.warn(_("no saved queues found, please use -n\n"))
+ return 1
+ mergeq = queue(ui, repo.join(""), newpath)
+ ui.warn(_("merging with queue at: %s\n") % mergeq.path)
+ ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
+ mergeq=mergeq, all=opts.get('all'))
+ return ret
+
+def pop(ui, repo, patch=None, **opts):
+ """pop the current patch off the stack
+
+ By default, pops off the top of the patch stack. If given a patch
+ name, keeps popping off patches until the named patch is at the
+ top of the stack.
+ """
+ localupdate = True
+ if opts['name']:
+ q = queue(ui, repo.join(""), repo.join(opts['name']))
+ ui.warn(_('using patch queue: %s\n') % q.path)
+ localupdate = False
+ else:
+ q = repo.mq
+ ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
+ all=opts['all'])
+ q.save_dirty()
+ return ret
+
+def rename(ui, repo, patch, name=None, **opts):
+ """rename a patch
+
+ With one argument, renames the current patch to PATCH1.
+ With two arguments, renames PATCH1 to PATCH2."""
+
+ q = repo.mq
+
+ if not name:
+ name = patch
+ patch = None
+
+ if patch:
+ patch = q.lookup(patch)
+ else:
+ if not q.applied:
+ ui.write(_('no patches applied\n'))
+ return
+ patch = q.lookup('qtip')
+ absdest = q.join(name)
+ if os.path.isdir(absdest):
+ name = normname(os.path.join(name, os.path.basename(patch)))
+ absdest = q.join(name)
+ if os.path.exists(absdest):
+ raise util.Abort(_('%s already exists') % absdest)
+
+ if name in q.series:
+ raise util.Abort(_('A patch named %s already exists in the series file') % name)
+
+ if ui.verbose:
+ ui.write('renaming %s to %s\n' % (patch, name))
+ i = q.find_series(patch)
+ guards = q.guard_re.findall(q.full_series[i])
+ q.full_series[i] = name + ''.join([' #' + g for g in guards])
+ q.parse_series()
+ q.series_dirty = 1
+
+ info = q.isapplied(patch)
+ if info:
+ q.applied[info[0]] = statusentry(info[1], name)
+ q.applied_dirty = 1
+
+ util.rename(q.join(patch), absdest)
+ r = q.qrepo()
+ if r:
+ wlock = r.wlock()
+ try:
+ if r.dirstate[patch] == 'a':
+ r.dirstate.forget(patch)
+ r.dirstate.add(name)
+ else:
+ if r.dirstate[name] == 'r':
+ r.undelete([name])
+ r.copy(patch, name)
+ r.remove([patch], False)
+ finally:
+ wlock.release()
+
+ q.save_dirty()
+
+def restore(ui, repo, rev, **opts):
+ """restore the queue state saved by a revision"""
+ rev = repo.lookup(rev)
+ q = repo.mq
+ q.restore(repo, rev, delete=opts['delete'],
+ qupdate=opts['update'])
+ q.save_dirty()
+ return 0
+
+def save(ui, repo, **opts):
+ """save current queue state"""
+ q = repo.mq
+ message = cmdutil.logmessage(opts)
+ ret = q.save(repo, msg=message)
+ if ret:
+ return ret
+ q.save_dirty()
+ if opts['copy']:
+ path = q.path
+ if opts['name']:
+ newpath = os.path.join(q.basepath, opts['name'])
+ if os.path.exists(newpath):
+ if not os.path.isdir(newpath):
+ raise util.Abort(_('destination %s exists and is not '
+ 'a directory') % newpath)
+ if not opts['force']:
+ raise util.Abort(_('destination %s exists, '
+ 'use -f to force') % newpath)
+ else:
+ newpath = savename(path)
+ ui.warn(_("copy %s to %s\n") % (path, newpath))
+ util.copyfiles(path, newpath)
+ if opts['empty']:
+ try:
+ os.unlink(q.join(q.status_path))
+ except:
+ pass
+ return 0
+
+def strip(ui, repo, rev, **opts):
+ """strip a revision and all its descendants from the repository
+
+ If one of the working directory's parent revisions is stripped, the
+ working directory will be updated to the parent of the stripped
+ revision.
+ """
+ backup = 'all'
+ if opts['backup']:
+ backup = 'strip'
+ elif opts['nobackup']:
+ backup = 'none'
+
+ rev = repo.lookup(rev)
+ p = repo.dirstate.parents()
+ cl = repo.changelog
+ update = True
+ if p[0] == nullid:
+ update = False
+ elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
+ update = False
+ elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
+ update = False
+
+ repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
+ return 0
+
+def select(ui, repo, *args, **opts):
+ '''set or print guarded patches to push
+
+ Use the qguard command to set or print guards on patch, then use
+ qselect to tell mq which guards to use. A patch will be pushed if
+ it has no guards or any positive guards match the currently
+ selected guard, but will not be pushed if any negative guards
+ match the current guard. For example:
+
+ qguard foo.patch -stable (negative guard)
+ qguard bar.patch +stable (positive guard)
+ qselect stable
+
+ This activates the "stable" guard. mq will skip foo.patch (because
+ it has a negative match) but push bar.patch (because it has a
+ positive match).
+
+ With no arguments, prints the currently active guards.
+ With one argument, sets the active guard.
+
+ Use -n/--none to deactivate guards (no other arguments needed).
+ When no guards are active, patches with positive guards are
+ skipped and patches with negative guards are pushed.
+
+ qselect can change the guards on applied patches. It does not pop
+ guarded patches by default. Use --pop to pop back to the last
+ applied patch that is not guarded. Use --reapply (which implies
+ --pop) to push back to the current patch afterwards, but skip
+ guarded patches.
+
+ Use -s/--series to print a list of all guards in the series file
+ (no other arguments needed). Use -v for more information.'''
+
+ q = repo.mq
+ guards = q.active()
+ if args or opts['none']:
+ old_unapplied = q.unapplied(repo)
+ old_guarded = [i for i in xrange(len(q.applied)) if
+ not q.pushable(i)[0]]
+ q.set_active(args)
+ q.save_dirty()
+ if not args:
+ ui.status(_('guards deactivated\n'))
+ if not opts['pop'] and not opts['reapply']:
+ unapplied = q.unapplied(repo)
+ guarded = [i for i in xrange(len(q.applied))
+ if not q.pushable(i)[0]]
+ if len(unapplied) != len(old_unapplied):
+ ui.status(_('number of unguarded, unapplied patches has '
+ 'changed from %d to %d\n') %
+ (len(old_unapplied), len(unapplied)))
+ if len(guarded) != len(old_guarded):
+ ui.status(_('number of guarded, applied patches has changed '
+ 'from %d to %d\n') %
+ (len(old_guarded), len(guarded)))
+ elif opts['series']:
+ guards = {}
+ noguards = 0
+ for gs in q.series_guards:
+ if not gs:
+ noguards += 1
+ for g in gs:
+ guards.setdefault(g, 0)
+ guards[g] += 1
+ if ui.verbose:
+ guards['NONE'] = noguards
+ guards = guards.items()
+ guards.sort(key=lambda x: x[0][1:])
+ if guards:
+ ui.note(_('guards in series file:\n'))
+ for guard, count in guards:
+ ui.note('%2d ' % count)
+ ui.write(guard, '\n')
+ else:
+ ui.note(_('no guards in series file\n'))
+ else:
+ if guards:
+ ui.note(_('active guards:\n'))
+ for g in guards:
+ ui.write(g, '\n')
+ else:
+ ui.write(_('no active guards\n'))
+ reapply = opts['reapply'] and q.applied and q.appliedname(-1)
+ popped = False
+ if opts['pop'] or opts['reapply']:
+ for i in xrange(len(q.applied)):
+ pushable, reason = q.pushable(i)
+ if not pushable:
+ ui.status(_('popping guarded patches\n'))
+ popped = True
+ if i == 0:
+ q.pop(repo, all=True)
+ else:
+ q.pop(repo, i-1)
+ break
+ if popped:
+ try:
+ if reapply:
+ ui.status(_('reapplying unguarded patches\n'))
+ q.push(repo, reapply)
+ finally:
+ q.save_dirty()
+
+def finish(ui, repo, *revrange, **opts):
+ """move applied patches into repository history
+
+ Finishes the specified revisions (corresponding to applied
+ patches) by moving them out of mq control into regular repository
+ history.
+
+ Accepts a revision range or the -a/--applied option. If --applied
+ is specified, all applied mq revisions are removed from mq
+ control. Otherwise, the given revisions must be at the base of the
+ stack of applied patches.
+
+ This can be especially useful if your changes have been applied to
+ an upstream repository, or if you are about to push your changes
+ to upstream.
+ """
+ if not opts['applied'] and not revrange:
+ raise util.Abort(_('no revisions specified'))
+ elif opts['applied']:
+ revrange = ('qbase:qtip',) + revrange
+
+ q = repo.mq
+ if not q.applied:
+ ui.status(_('no patches applied\n'))
+ return 0
+
+ revs = cmdutil.revrange(repo, revrange)
+ q.finish(repo, revs)
+ q.save_dirty()
+ return 0
+
+def reposetup(ui, repo):
+ class mqrepo(repo.__class__):
+ @util.propertycache
+ def mq(self):
+ return queue(self.ui, self.join(""))
+
+ def abort_if_wdir_patched(self, errmsg, force=False):
+ if self.mq.applied and not force:
+ parent = hex(self.dirstate.parents()[0])
+ if parent in [s.rev for s in self.mq.applied]:
+ raise util.Abort(errmsg)
+
+ def commit(self, text="", user=None, date=None, match=None,
+ force=False, editor=False, extra={}):
+ self.abort_if_wdir_patched(
+ _('cannot commit over an applied mq patch'),
+ force)
+
+ return super(mqrepo, self).commit(text, user, date, match, force,
+ editor, extra)
+
+ def push(self, remote, force=False, revs=None):
+ if self.mq.applied and not force and not revs:
+ raise util.Abort(_('source has mq patches applied'))
+ return super(mqrepo, self).push(remote, force, revs)
+
+ def _findtags(self):
+ '''augment tags from base class with patch tags'''
+ result = super(mqrepo, self)._findtags()
+
+ q = self.mq
+ if not q.applied:
+ return result
+
+ mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
+
+ if mqtags[-1][0] not in self.changelog.nodemap:
+ self.ui.warn(_('mq status file refers to unknown node %s\n')
+ % short(mqtags[-1][0]))
+ return result
+
+ mqtags.append((mqtags[-1][0], 'qtip'))
+ mqtags.append((mqtags[0][0], 'qbase'))
+ mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
+ tags = result[0]
+ for patch in mqtags:
+ if patch[1] in tags:
+ self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
+ % patch[1])
+ else:
+ tags[patch[1]] = patch[0]
+
+ return result
+
+ def _branchtags(self, partial, lrev):
+ q = self.mq
+ if not q.applied:
+ return super(mqrepo, self)._branchtags(partial, lrev)
+
+ cl = self.changelog
+ qbasenode = bin(q.applied[0].rev)
+ if qbasenode not in cl.nodemap:
+ self.ui.warn(_('mq status file refers to unknown node %s\n')
+ % short(qbasenode))
+ return super(mqrepo, self)._branchtags(partial, lrev)
+
+ qbase = cl.rev(qbasenode)
+ start = lrev + 1
+ if start < qbase:
+ # update the cache (excluding the patches) and save it
+ self._updatebranchcache(partial, lrev+1, qbase)
+ self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
+ start = qbase
+ # if start = qbase, the cache is as updated as it should be.
+ # if start > qbase, the cache includes (part of) the patches.
+ # we might as well use it, but we won't save it.
+
+ # update the cache up to the tip
+ self._updatebranchcache(partial, start, len(cl))
+
+ return partial
+
+ if repo.local():
+ repo.__class__ = mqrepo
+
+def mqimport(orig, ui, repo, *args, **kwargs):
+ if hasattr(repo, 'abort_if_wdir_patched'):
+ repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
+ kwargs.get('force'))
+ return orig(ui, repo, *args, **kwargs)
+
+def uisetup(ui):
+ extensions.wrapcommand(commands.table, 'import', mqimport)
+
+seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
+
+cmdtable = {
+ "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
+ "qclone":
+ (clone,
+ [('', 'pull', None, _('use pull protocol to copy metadata')),
+ ('U', 'noupdate', None, _('do not update the new working directories')),
+ ('', 'uncompressed', None,
+ _('use uncompressed transfer (fast over LAN)')),
+ ('p', 'patches', '', _('location of source patch repository')),
+ ] + commands.remoteopts,
+ _('hg qclone [OPTION]... SOURCE [DEST]')),
+ "qcommit|qci":
+ (commit,
+ commands.table["^commit|ci"][1],
+ _('hg qcommit [OPTION]... [FILE]...')),
+ "^qdiff":
+ (diff,
+ commands.diffopts + commands.diffopts2 + commands.walkopts,
+ _('hg qdiff [OPTION]... [FILE]...')),
+ "qdelete|qremove|qrm":
+ (delete,
+ [('k', 'keep', None, _('keep patch file')),
+ ('r', 'rev', [], _('stop managing a revision (DEPRECATED)'))],
+ _('hg qdelete [-k] [-r REV]... [PATCH]...')),
+ 'qfold':
+ (fold,
+ [('e', 'edit', None, _('edit patch header')),
+ ('k', 'keep', None, _('keep folded patch files')),
+ ] + commands.commitopts,
+ _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
+ 'qgoto':
+ (goto,
+ [('f', 'force', None, _('overwrite any local changes'))],
+ _('hg qgoto [OPTION]... PATCH')),
+ 'qguard':
+ (guard,
+ [('l', 'list', None, _('list all patches and guards')),
+ ('n', 'none', None, _('drop all guards'))],
+ _('hg qguard [-l] [-n] -- [PATCH] [+GUARD]... [-GUARD]...')),
+ 'qheader': (header, [], _('hg qheader [PATCH]')),
+ "^qimport":
+ (qimport,
+ [('e', 'existing', None, _('import file in patch directory')),
+ ('n', 'name', '', _('name of patch file')),
+ ('f', 'force', None, _('overwrite existing files')),
+ ('r', 'rev', [], _('place existing revisions under mq control')),
+ ('g', 'git', None, _('use git extended diff format')),
+ ('P', 'push', None, _('qpush after importing'))],
+ _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
+ "^qinit":
+ (init,
+ [('c', 'create-repo', None, _('create queue repository'))],
+ _('hg qinit [-c]')),
+ "qnew":
+ (new,
+ [('e', 'edit', None, _('edit commit message')),
+ ('f', 'force', None, _('import uncommitted changes into patch')),
+ ('g', 'git', None, _('use git extended diff format')),
+ ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
+ ('u', 'user', '', _('add "From: <given user>" to patch')),
+ ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
+ ('d', 'date', '', _('add "Date: <given date>" to patch'))
+ ] + commands.walkopts + commands.commitopts,
+ _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
+ "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
+ "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
+ "^qpop":
+ (pop,
+ [('a', 'all', None, _('pop all patches')),
+ ('n', 'name', '', _('queue name to pop')),
+ ('f', 'force', None, _('forget any local changes'))],
+ _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
+ "^qpush":
+ (push,
+ [('f', 'force', None, _('apply if the patch has rejects')),
+ ('l', 'list', None, _('list patch name in commit text')),
+ ('a', 'all', None, _('apply all patches')),
+ ('m', 'merge', None, _('merge from another queue')),
+ ('n', 'name', '', _('merge queue name'))],
+ _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
+ "^qrefresh":
+ (refresh,
+ [('e', 'edit', None, _('edit commit message')),
+ ('g', 'git', None, _('use git extended diff format')),
+ ('s', 'short', None, _('refresh only files already in the patch and specified files')),
+ ('U', 'currentuser', None, _('add/update author field in patch with current user')),
+ ('u', 'user', '', _('add/update author field in patch with given user')),
+ ('D', 'currentdate', None, _('add/update date field in patch with current date')),
+ ('d', 'date', '', _('add/update date field in patch with given date'))
+ ] + commands.walkopts + commands.commitopts,
+ _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
+ 'qrename|qmv':
+ (rename, [], _('hg qrename PATCH1 [PATCH2]')),
+ "qrestore":
+ (restore,
+ [('d', 'delete', None, _('delete save entry')),
+ ('u', 'update', None, _('update queue working directory'))],
+ _('hg qrestore [-d] [-u] REV')),
+ "qsave":
+ (save,
+ [('c', 'copy', None, _('copy patch directory')),
+ ('n', 'name', '', _('copy directory name')),
+ ('e', 'empty', None, _('clear queue status file')),
+ ('f', 'force', None, _('force copy'))] + commands.commitopts,
+ _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
+ "qselect":
+ (select,
+ [('n', 'none', None, _('disable all guards')),
+ ('s', 'series', None, _('list all guards in series file')),
+ ('', 'pop', None, _('pop to before first guarded applied patch')),
+ ('', 'reapply', None, _('pop, then reapply patches'))],
+ _('hg qselect [OPTION]... [GUARD]...')),
+ "qseries":
+ (series,
+ [('m', 'missing', None, _('print patches not in series')),
+ ] + seriesopts,
+ _('hg qseries [-ms]')),
+ "^strip":
+ (strip,
+ [('f', 'force', None, _('force removal with local changes')),
+ ('b', 'backup', None, _('bundle unrelated changesets')),
+ ('n', 'nobackup', None, _('no backups'))],
+ _('hg strip [-f] [-b] [-n] REV')),
+ "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
+ "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
+ "qfinish":
+ (finish,
+ [('a', 'applied', None, _('finish all applied changesets'))],
+ _('hg qfinish [-a] [REV]...')),
+}
diff --git a/sys/src/cmd/hg/hgext/notify.py b/sys/src/cmd/hg/hgext/notify.py
new file mode 100644
index 000000000..4cd27dc05
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/notify.py
@@ -0,0 +1,298 @@
+# notify.py - email notifications for mercurial
+#
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''hooks for sending email notifications at commit/push time
+
+Subscriptions can be managed through a hgrc file. Default mode is to
+print messages to stdout, for testing and configuring.
+
+To use, configure the notify extension and enable it in hgrc like
+this::
+
+ [extensions]
+ hgext.notify =
+
+ [hooks]
+ # one email for each incoming changeset
+ incoming.notify = python:hgext.notify.hook
+ # batch emails when many changesets incoming at one time
+ changegroup.notify = python:hgext.notify.hook
+
+ [notify]
+ # config items go here
+
+Required configuration items::
+
+ config = /path/to/file # file containing subscriptions
+
+Optional configuration items::
+
+ test = True # print messages to stdout for testing
+ strip = 3 # number of slashes to strip for url paths
+ domain = example.com # domain to use if committer missing domain
+ style = ... # style file to use when formatting email
+ template = ... # template to use when formatting email
+ incoming = ... # template to use when run as incoming hook
+ changegroup = ... # template when run as changegroup hook
+ maxdiff = 300 # max lines of diffs to include (0=none, -1=all)
+ maxsubject = 67 # truncate subject line longer than this
+ diffstat = True # add a diffstat before the diff content
+ sources = serve # notify if source of incoming changes in this list
+ # (serve == ssh or http, push, pull, bundle)
+ [email]
+ from = user@host.com # email address to send as if none given
+ [web]
+ baseurl = http://hgserver/... # root of hg web site for browsing commits
+
+The notify config file has same format as a regular hgrc file. It has
+two sections so you can express subscriptions in whatever way is
+handier for you.
+
+::
+
+ [usersubs]
+ # key is subscriber email, value is ","-separated list of glob patterns
+ user@host = pattern
+
+ [reposubs]
+ # key is glob pattern, value is ","-separated list of subscriber emails
+ pattern = user@host
+
+Glob patterns are matched against path to repository root.
+
+If you like, you can put notify config file in repository that users
+can push changes to, they can manage their own subscriptions.
+'''
+
+from mercurial.i18n import _
+from mercurial import patch, cmdutil, templater, util, mail
+import email.Parser, email.Errors, fnmatch, socket, time
+
+# template for single changeset can include email headers.
+single_template = '''
+Subject: changeset in {webroot}: {desc|firstline|strip}
+From: {author}
+
+changeset {node|short} in {root}
+details: {baseurl}{webroot}?cmd=changeset;node={node|short}
+description:
+\t{desc|tabindent|strip}
+'''.lstrip()
+
+# template for multiple changesets should not contain email headers,
+# because only first set of headers will be used and result will look
+# strange.
+multiple_template = '''
+changeset {node|short} in {root}
+details: {baseurl}{webroot}?cmd=changeset;node={node|short}
+summary: {desc|firstline}
+'''
+
+deftemplates = {
+ 'changegroup': multiple_template,
+}
+
+class notifier(object):
+ '''email notification class.'''
+
+ def __init__(self, ui, repo, hooktype):
+ self.ui = ui
+ cfg = self.ui.config('notify', 'config')
+ if cfg:
+ self.ui.readconfig(cfg, sections=['usersubs', 'reposubs'])
+ self.repo = repo
+ self.stripcount = int(self.ui.config('notify', 'strip', 0))
+ self.root = self.strip(self.repo.root)
+ self.domain = self.ui.config('notify', 'domain')
+ self.test = self.ui.configbool('notify', 'test', True)
+ self.charsets = mail._charsets(self.ui)
+ self.subs = self.subscribers()
+
+ mapfile = self.ui.config('notify', 'style')
+ template = (self.ui.config('notify', hooktype) or
+ self.ui.config('notify', 'template'))
+ self.t = cmdutil.changeset_templater(self.ui, self.repo,
+ False, None, mapfile, False)
+ if not mapfile and not template:
+ template = deftemplates.get(hooktype) or single_template
+ if template:
+ template = templater.parsestring(template, quoted=False)
+ self.t.use_template(template)
+
+ def strip(self, path):
+ '''strip leading slashes from local path, turn into web-safe path.'''
+
+ path = util.pconvert(path)
+ count = self.stripcount
+ while count > 0:
+ c = path.find('/')
+ if c == -1:
+ break
+ path = path[c+1:]
+ count -= 1
+ return path
+
+ def fixmail(self, addr):
+ '''try to clean up email addresses.'''
+
+ addr = util.email(addr.strip())
+ if self.domain:
+ a = addr.find('@localhost')
+ if a != -1:
+ addr = addr[:a]
+ if '@' not in addr:
+ return addr + '@' + self.domain
+ return addr
+
+ def subscribers(self):
+ '''return list of email addresses of subscribers to this repo.'''
+ subs = set()
+ for user, pats in self.ui.configitems('usersubs'):
+ for pat in pats.split(','):
+ if fnmatch.fnmatch(self.repo.root, pat.strip()):
+ subs.add(self.fixmail(user))
+ for pat, users in self.ui.configitems('reposubs'):
+ if fnmatch.fnmatch(self.repo.root, pat):
+ for user in users.split(','):
+ subs.add(self.fixmail(user))
+ return [mail.addressencode(self.ui, s, self.charsets, self.test)
+ for s in sorted(subs)]
+
+ def url(self, path=None):
+ return self.ui.config('web', 'baseurl') + (path or self.root)
+
+ def node(self, ctx):
+ '''format one changeset.'''
+ self.t.show(ctx, changes=ctx.changeset(),
+ baseurl=self.ui.config('web', 'baseurl'),
+ root=self.repo.root, webroot=self.root)
+
+ def skipsource(self, source):
+ '''true if incoming changes from this source should be skipped.'''
+ ok_sources = self.ui.config('notify', 'sources', 'serve').split()
+ return source not in ok_sources
+
+ def send(self, ctx, count, data):
+ '''send message.'''
+
+ p = email.Parser.Parser()
+ try:
+ msg = p.parsestr(data)
+ except email.Errors.MessageParseError, inst:
+ raise util.Abort(inst)
+
+ # store sender and subject
+ sender, subject = msg['From'], msg['Subject']
+ del msg['From'], msg['Subject']
+
+ if not msg.is_multipart():
+ # create fresh mime message from scratch
+ # (multipart templates must take care of this themselves)
+ headers = msg.items()
+ payload = msg.get_payload()
+ # for notification prefer readability over data precision
+ msg = mail.mimeencode(self.ui, payload, self.charsets, self.test)
+ # reinstate custom headers
+ for k, v in headers:
+ msg[k] = v
+
+ msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2")
+
+ # try to make subject line exist and be useful
+ if not subject:
+ if count > 1:
+ subject = _('%s: %d new changesets') % (self.root, count)
+ else:
+ s = ctx.description().lstrip().split('\n', 1)[0].rstrip()
+ subject = '%s: %s' % (self.root, s)
+ maxsubject = int(self.ui.config('notify', 'maxsubject', 67))
+ if maxsubject and len(subject) > maxsubject:
+ subject = subject[:maxsubject-3] + '...'
+ msg['Subject'] = mail.headencode(self.ui, subject,
+ self.charsets, self.test)
+
+ # try to make message have proper sender
+ if not sender:
+ sender = self.ui.config('email', 'from') or self.ui.username()
+ if '@' not in sender or '@localhost' in sender:
+ sender = self.fixmail(sender)
+ msg['From'] = mail.addressencode(self.ui, sender,
+ self.charsets, self.test)
+
+ msg['X-Hg-Notification'] = 'changeset %s' % ctx
+ if not msg['Message-Id']:
+ msg['Message-Id'] = ('<hg.%s.%s.%s@%s>' %
+ (ctx, int(time.time()),
+ hash(self.repo.root), socket.getfqdn()))
+ msg['To'] = ', '.join(self.subs)
+
+ msgtext = msg.as_string()
+ if self.test:
+ self.ui.write(msgtext)
+ if not msgtext.endswith('\n'):
+ self.ui.write('\n')
+ else:
+ self.ui.status(_('notify: sending %d subscribers %d changes\n') %
+ (len(self.subs), count))
+ mail.sendmail(self.ui, util.email(msg['From']),
+ self.subs, msgtext)
+
+ def diff(self, ctx, ref=None):
+
+ maxdiff = int(self.ui.config('notify', 'maxdiff', 300))
+ prev = ctx.parents()[0].node()
+ ref = ref and ref.node() or ctx.node()
+ chunks = patch.diff(self.repo, prev, ref, opts=patch.diffopts(self.ui))
+ difflines = ''.join(chunks).splitlines()
+
+ if self.ui.configbool('notify', 'diffstat', True):
+ s = patch.diffstat(difflines)
+ # s may be nil, don't include the header if it is
+ if s:
+ self.ui.write('\ndiffstat:\n\n%s' % s)
+
+ if maxdiff == 0:
+ return
+ elif maxdiff > 0 and len(difflines) > maxdiff:
+ msg = _('\ndiffs (truncated from %d to %d lines):\n\n')
+ self.ui.write(msg % (len(difflines), maxdiff))
+ difflines = difflines[:maxdiff]
+ elif difflines:
+ self.ui.write(_('\ndiffs (%d lines):\n\n') % len(difflines))
+
+ self.ui.write("\n".join(difflines))
+
+def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
+ '''send email notifications to interested subscribers.
+
+ if used as changegroup hook, send one email for all changesets in
+ changegroup. else send one email per changeset.'''
+
+ n = notifier(ui, repo, hooktype)
+ ctx = repo[node]
+
+ if not n.subs:
+ ui.debug(_('notify: no subscribers to repository %s\n') % n.root)
+ return
+ if n.skipsource(source):
+ ui.debug(_('notify: changes have source "%s" - skipping\n') % source)
+ return
+
+ ui.pushbuffer()
+ if hooktype == 'changegroup':
+ start, end = ctx.rev(), len(repo)
+ count = end - start
+ for rev in xrange(start, end):
+ n.node(repo[rev])
+ n.diff(ctx, repo['tip'])
+ else:
+ count = 1
+ n.node(ctx)
+ n.diff(ctx)
+
+ data = ui.popbuffer()
+ n.send(ctx, count, data)
diff --git a/sys/src/cmd/hg/hgext/pager.py b/sys/src/cmd/hg/hgext/pager.py
new file mode 100644
index 000000000..1d973c485
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/pager.py
@@ -0,0 +1,64 @@
+# pager.py - display output using a pager
+#
+# Copyright 2008 David Soria Parra <dsp@php.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+#
+# To load the extension, add it to your .hgrc file:
+#
+# [extension]
+# hgext.pager =
+#
+# Run "hg help pager" to get info on configuration.
+
+'''browse command output with an external pager
+
+To set the pager that should be used, set the application variable::
+
+ [pager]
+ pager = LESS='FSRX' less
+
+If no pager is set, the pager extensions uses the environment variable
+$PAGER. If neither pager.pager, nor $PAGER is set, no pager is used.
+
+If you notice "BROKEN PIPE" error messages, you can disable them by
+setting::
+
+ [pager]
+ quiet = True
+
+You can disable the pager for certain commands by adding them to the
+pager.ignore list::
+
+ [pager]
+ ignore = version, help, update
+
+You can also enable the pager only for certain commands using
+pager.attend::
+
+ [pager]
+ attend = log
+
+If pager.attend is present, pager.ignore will be ignored.
+
+To ignore global commands like "hg version" or "hg help", you have to
+specify them in the global .hgrc
+'''
+
+import sys, os, signal
+from mercurial import dispatch, util, extensions
+
+def uisetup(ui):
+ def pagecmd(orig, ui, options, cmd, cmdfunc):
+ p = ui.config("pager", "pager", os.environ.get("PAGER"))
+ if p and sys.stdout.isatty() and '--debugger' not in sys.argv:
+ attend = ui.configlist('pager', 'attend')
+ if (cmd in attend or
+ (cmd not in ui.configlist('pager', 'ignore') and not attend)):
+ sys.stderr = sys.stdout = util.popen(p, "wb")
+ if ui.configbool('pager', 'quiet'):
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+ return orig(ui, options, cmd, cmdfunc)
+
+ extensions.wrapfunction(dispatch, '_runcommand', pagecmd)
diff --git a/sys/src/cmd/hg/hgext/parentrevspec.py b/sys/src/cmd/hg/hgext/parentrevspec.py
new file mode 100644
index 000000000..6d6b2eb6c
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/parentrevspec.py
@@ -0,0 +1,96 @@
+# Mercurial extension to make it easy to refer to the parent of a revision
+#
+# Copyright (C) 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''interpret suffixes to refer to ancestor revisions
+
+This extension allows you to use git-style suffixes to refer to the
+ancestors of a specific revision.
+
+For example, if you can refer to a revision as "foo", then::
+
+ foo^N = Nth parent of foo
+ foo^0 = foo
+ foo^1 = first parent of foo
+ foo^2 = second parent of foo
+ foo^ = foo^1
+
+ foo~N = Nth first grandparent of foo
+ foo~0 = foo
+ foo~1 = foo^1 = foo^ = first parent of foo
+ foo~2 = foo^1^1 = foo^^ = first parent of first parent of foo
+'''
+from mercurial import error
+
+def reposetup(ui, repo):
+ if not repo.local():
+ return
+
+ class parentrevspecrepo(repo.__class__):
+ def lookup(self, key):
+ try:
+ _super = super(parentrevspecrepo, self)
+ return _super.lookup(key)
+ except error.RepoError:
+ pass
+
+ circ = key.find('^')
+ tilde = key.find('~')
+ if circ < 0 and tilde < 0:
+ raise
+ elif circ >= 0 and tilde >= 0:
+ end = min(circ, tilde)
+ else:
+ end = max(circ, tilde)
+
+ cl = self.changelog
+ base = key[:end]
+ try:
+ node = _super.lookup(base)
+ except error.RepoError:
+ # eek - reraise the first error
+ return _super.lookup(key)
+
+ rev = cl.rev(node)
+ suffix = key[end:]
+ i = 0
+ while i < len(suffix):
+ # foo^N => Nth parent of foo
+ # foo^0 == foo
+ # foo^1 == foo^ == 1st parent of foo
+ # foo^2 == 2nd parent of foo
+ if suffix[i] == '^':
+ j = i + 1
+ p = cl.parentrevs(rev)
+ if j < len(suffix) and suffix[j].isdigit():
+ j += 1
+ n = int(suffix[i+1:j])
+ if n > 2 or n == 2 and p[1] == -1:
+ raise
+ else:
+ n = 1
+ if n:
+ rev = p[n - 1]
+ i = j
+ # foo~N => Nth first grandparent of foo
+ # foo~0 = foo
+ # foo~1 = foo^1 == foo^ == 1st parent of foo
+ # foo~2 = foo^1^1 == foo^^ == 1st parent of 1st parent of foo
+ elif suffix[i] == '~':
+ j = i + 1
+ while j < len(suffix) and suffix[j].isdigit():
+ j += 1
+ if j == i + 1:
+ raise
+ n = int(suffix[i+1:j])
+ for k in xrange(n):
+ rev = cl.parentrevs(rev)[0]
+ i = j
+ else:
+ raise
+ return cl.node(rev)
+
+ repo.__class__ = parentrevspecrepo
diff --git a/sys/src/cmd/hg/hgext/patchbomb.py b/sys/src/cmd/hg/hgext/patchbomb.py
new file mode 100644
index 000000000..8ad33384b
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/patchbomb.py
@@ -0,0 +1,513 @@
+# patchbomb.py - sending Mercurial changesets as patch emails
+#
+# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''command to send changesets as (a series of) patch emails
+
+The series is started off with a "[PATCH 0 of N]" introduction, which
+describes the series as a whole.
+
+Each patch email has a Subject line of "[PATCH M of N] ...", using the
+first line of the changeset description as the subject text. The
+message contains two or three body parts:
+
+- The changeset description.
+- [Optional] The result of running diffstat on the patch.
+- The patch itself, as generated by "hg export".
+
+Each message refers to the first in the series using the In-Reply-To
+and References headers, so they will show up as a sequence in threaded
+mail and news readers, and in mail archives.
+
+With the -d/--diffstat option, you will be prompted for each changeset
+with a diffstat summary and the changeset summary, so you can be sure
+you are sending the right changes.
+
+To configure other defaults, add a section like this to your hgrc
+file::
+
+ [email]
+ from = My Name <my@email>
+ to = recipient1, recipient2, ...
+ cc = cc1, cc2, ...
+ bcc = bcc1, bcc2, ...
+
+Then you can use the "hg email" command to mail a series of changesets
+as a patchbomb.
+
+To avoid sending patches prematurely, it is a good idea to first run
+the "email" command with the "-n" option (test only). You will be
+prompted for an email recipient address, a subject and an introductory
+message describing the patches of your patchbomb. Then when all is
+done, patchbomb messages are displayed. If the PAGER environment
+variable is set, your pager will be fired up once for each patchbomb
+message, so you can verify everything is alright.
+
+The -m/--mbox option is also very useful. Instead of previewing each
+patchbomb message in a pager or sending the messages directly, it will
+create a UNIX mailbox file with the patch emails. This mailbox file
+can be previewed with any mail user agent which supports UNIX mbox
+files, e.g. with mutt::
+
+ % mutt -R -f mbox
+
+When you are previewing the patchbomb messages, you can use ``formail``
+(a utility that is commonly installed as part of the procmail
+package), to send each message out::
+
+ % formail -s sendmail -bm -t < mbox
+
+That should be all. Now your patchbomb is on its way out.
+
+You can also either configure the method option in the email section
+to be a sendmail compatible mailer or fill out the [smtp] section so
+that the patchbomb extension can automatically send patchbombs
+directly from the commandline. See the [email] and [smtp] sections in
+hgrc(5) for details.
+'''
+
+import os, errno, socket, tempfile, cStringIO, time
+import email.MIMEMultipart, email.MIMEBase
+import email.Utils, email.Encoders, email.Generator
+from mercurial import cmdutil, commands, hg, mail, patch, util
+from mercurial.i18n import _
+from mercurial.node import bin
+
+def prompt(ui, prompt, default=None, rest=': ', empty_ok=False):
+ if not ui.interactive():
+ return default
+ if default:
+ prompt += ' [%s]' % default
+ prompt += rest
+ while True:
+ r = ui.prompt(prompt, default=default)
+ if r:
+ return r
+ if default is not None:
+ return default
+ if empty_ok:
+ return r
+ ui.warn(_('Please enter a valid value.\n'))
+
+def cdiffstat(ui, summary, patchlines):
+ s = patch.diffstat(patchlines)
+ if summary:
+ ui.write(summary, '\n')
+ ui.write(s, '\n')
+ ans = prompt(ui, _('does the diffstat above look okay? '), 'y')
+ if not ans.lower().startswith('y'):
+ raise util.Abort(_('diffstat rejected'))
+ return s
+
+def makepatch(ui, repo, patch, opts, _charsets, idx, total, patchname=None):
+
+ desc = []
+ node = None
+ body = ''
+
+ for line in patch:
+ if line.startswith('#'):
+ if line.startswith('# Node ID'):
+ node = line.split()[-1]
+ continue
+ if line.startswith('diff -r') or line.startswith('diff --git'):
+ break
+ desc.append(line)
+
+ if not patchname and not node:
+ raise ValueError
+
+ if opts.get('attach'):
+ body = ('\n'.join(desc[1:]).strip() or
+ 'Patch subject is complete summary.')
+ body += '\n\n\n'
+
+ if opts.get('plain'):
+ while patch and patch[0].startswith('# '):
+ patch.pop(0)
+ if patch:
+ patch.pop(0)
+ while patch and not patch[0].strip():
+ patch.pop(0)
+
+ if opts.get('diffstat'):
+ body += cdiffstat(ui, '\n'.join(desc), patch) + '\n\n'
+
+ if opts.get('attach') or opts.get('inline'):
+ msg = email.MIMEMultipart.MIMEMultipart()
+ if body:
+ msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
+ p = mail.mimetextpatch('\n'.join(patch), 'x-patch', opts.get('test'))
+ binnode = bin(node)
+ # if node is mq patch, it will have the patch file's name as a tag
+ if not patchname:
+ patchtags = [t for t in repo.nodetags(binnode)
+ if t.endswith('.patch') or t.endswith('.diff')]
+ if patchtags:
+ patchname = patchtags[0]
+ elif total > 1:
+ patchname = cmdutil.make_filename(repo, '%b-%n.patch',
+ binnode, seqno=idx, total=total)
+ else:
+ patchname = cmdutil.make_filename(repo, '%b.patch', binnode)
+ disposition = 'inline'
+ if opts.get('attach'):
+ disposition = 'attachment'
+ p['Content-Disposition'] = disposition + '; filename=' + patchname
+ msg.attach(p)
+ else:
+ body += '\n'.join(patch)
+ msg = mail.mimetextpatch(body, display=opts.get('test'))
+
+ flag = ' '.join(opts.get('flag'))
+ if flag:
+ flag = ' ' + flag
+
+ subj = desc[0].strip().rstrip('. ')
+ if total == 1 and not opts.get('intro'):
+ subj = '[PATCH%s] %s' % (flag, opts.get('subject') or subj)
+ else:
+ tlen = len(str(total))
+ subj = '[PATCH %0*d of %d%s] %s' % (tlen, idx, total, flag, subj)
+ msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
+ msg['X-Mercurial-Node'] = node
+ return msg, subj
+
+def patchbomb(ui, repo, *revs, **opts):
+ '''send changesets by email
+
+ By default, diffs are sent in the format generated by hg export,
+ one per message. The series starts with a "[PATCH 0 of N]"
+ introduction, which describes the series as a whole.
+
+ Each patch email has a Subject line of "[PATCH M of N] ...", using
+ the first line of the changeset description as the subject text.
+ The message contains two or three parts. First, the changeset
+ description. Next, (optionally) if the diffstat program is
+ installed and -d/--diffstat is used, the result of running
+ diffstat on the patch. Finally, the patch itself, as generated by
+ "hg export".
+
+ By default the patch is included as text in the email body for
+ easy reviewing. Using the -a/--attach option will instead create
+ an attachment for the patch. With -i/--inline an inline attachment
+ will be created.
+
+ With -o/--outgoing, emails will be generated for patches not found
+ in the destination repository (or only those which are ancestors
+ of the specified revisions if any are provided)
+
+ With -b/--bundle, changesets are selected as for --outgoing, but a
+ single email containing a binary Mercurial bundle as an attachment
+ will be sent.
+
+ Examples::
+
+ hg email -r 3000 # send patch 3000 only
+ hg email -r 3000 -r 3001 # send patches 3000 and 3001
+ hg email -r 3000:3005 # send patches 3000 through 3005
+ hg email 3000 # send patch 3000 (deprecated)
+
+ hg email -o # send all patches not in default
+ hg email -o DEST # send all patches not in DEST
+ hg email -o -r 3000 # send all ancestors of 3000 not in default
+ hg email -o -r 3000 DEST # send all ancestors of 3000 not in DEST
+
+ hg email -b # send bundle of all patches not in default
+ hg email -b DEST # send bundle of all patches not in DEST
+ hg email -b -r 3000 # bundle of all ancestors of 3000 not in default
+ hg email -b -r 3000 DEST # bundle of all ancestors of 3000 not in DEST
+
+ Before using this command, you will need to enable email in your
+ hgrc. See the [email] section in hgrc(5) for details.
+ '''
+
+ _charsets = mail._charsets(ui)
+
+ def outgoing(dest, revs):
+ '''Return the revisions present locally but not in dest'''
+ dest = ui.expandpath(dest or 'default-push', dest or 'default')
+ revs = [repo.lookup(rev) for rev in revs]
+ other = hg.repository(cmdutil.remoteui(repo, opts), dest)
+ ui.status(_('comparing with %s\n') % dest)
+ o = repo.findoutgoing(other)
+ if not o:
+ ui.status(_("no changes found\n"))
+ return []
+ o = repo.changelog.nodesbetween(o, revs or None)[0]
+ return [str(repo.changelog.rev(r)) for r in o]
+
+ def getpatches(revs):
+ for r in cmdutil.revrange(repo, revs):
+ output = cStringIO.StringIO()
+ patch.export(repo, [r], fp=output,
+ opts=patch.diffopts(ui, opts))
+ yield output.getvalue().split('\n')
+
+ def getbundle(dest):
+ tmpdir = tempfile.mkdtemp(prefix='hg-email-bundle-')
+ tmpfn = os.path.join(tmpdir, 'bundle')
+ try:
+ commands.bundle(ui, repo, tmpfn, dest, **opts)
+ return open(tmpfn, 'rb').read()
+ finally:
+ try:
+ os.unlink(tmpfn)
+ except:
+ pass
+ os.rmdir(tmpdir)
+
+ if not (opts.get('test') or opts.get('mbox')):
+ # really sending
+ mail.validateconfig(ui)
+
+ if not (revs or opts.get('rev')
+ or opts.get('outgoing') or opts.get('bundle')
+ or opts.get('patches')):
+ raise util.Abort(_('specify at least one changeset with -r or -o'))
+
+ if opts.get('outgoing') and opts.get('bundle'):
+ raise util.Abort(_("--outgoing mode always on with --bundle;"
+ " do not re-specify --outgoing"))
+
+ if opts.get('outgoing') or opts.get('bundle'):
+ if len(revs) > 1:
+ raise util.Abort(_("too many destinations"))
+ dest = revs and revs[0] or None
+ revs = []
+
+ if opts.get('rev'):
+ if revs:
+ raise util.Abort(_('use only one form to specify the revision'))
+ revs = opts.get('rev')
+
+ if opts.get('outgoing'):
+ revs = outgoing(dest, opts.get('rev'))
+ if opts.get('bundle'):
+ opts['revs'] = revs
+
+ # start
+ if opts.get('date'):
+ start_time = util.parsedate(opts.get('date'))
+ else:
+ start_time = util.makedate()
+
+ def genmsgid(id):
+ return '<%s.%s@%s>' % (id[:20], int(start_time[0]), socket.getfqdn())
+
+ def getdescription(body, sender):
+ if opts.get('desc'):
+ body = open(opts.get('desc')).read()
+ else:
+ ui.write(_('\nWrite the introductory message for the '
+ 'patch series.\n\n'))
+ body = ui.edit(body, sender)
+ return body
+
+ def getpatchmsgs(patches, patchnames=None):
+ jumbo = []
+ msgs = []
+
+ ui.write(_('This patch series consists of %d patches.\n\n')
+ % len(patches))
+
+ name = None
+ for i, p in enumerate(patches):
+ jumbo.extend(p)
+ if patchnames:
+ name = patchnames[i]
+ msg = makepatch(ui, repo, p, opts, _charsets, i + 1,
+ len(patches), name)
+ msgs.append(msg)
+
+ if len(patches) > 1 or opts.get('intro'):
+ tlen = len(str(len(patches)))
+
+ flag = ' '.join(opts.get('flag'))
+ if flag:
+ subj = '[PATCH %0*d of %d %s] ' % (tlen, 0, len(patches), flag)
+ else:
+ subj = '[PATCH %0*d of %d] ' % (tlen, 0, len(patches))
+ subj += opts.get('subject') or prompt(ui, 'Subject:', rest=subj,
+ default='None')
+
+ body = ''
+ if opts.get('diffstat'):
+ d = cdiffstat(ui, _('Final summary:\n'), jumbo)
+ if d:
+ body = '\n' + d
+
+ body = getdescription(body, sender)
+ msg = mail.mimeencode(ui, body, _charsets, opts.get('test'))
+ msg['Subject'] = mail.headencode(ui, subj, _charsets,
+ opts.get('test'))
+
+ msgs.insert(0, (msg, subj))
+ return msgs
+
+ def getbundlemsgs(bundle):
+ subj = (opts.get('subject')
+ or prompt(ui, 'Subject:', 'A bundle for your repository'))
+
+ body = getdescription('', sender)
+ msg = email.MIMEMultipart.MIMEMultipart()
+ if body:
+ msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
+ datapart = email.MIMEBase.MIMEBase('application', 'x-mercurial-bundle')
+ datapart.set_payload(bundle)
+ bundlename = '%s.hg' % opts.get('bundlename', 'bundle')
+ datapart.add_header('Content-Disposition', 'attachment',
+ filename=bundlename)
+ email.Encoders.encode_base64(datapart)
+ msg.attach(datapart)
+ msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
+ return [(msg, subj)]
+
+ sender = (opts.get('from') or ui.config('email', 'from') or
+ ui.config('patchbomb', 'from') or
+ prompt(ui, 'From', ui.username()))
+
+ # internal option used by pbranches
+ patches = opts.get('patches')
+ if patches:
+ msgs = getpatchmsgs(patches, opts.get('patchnames'))
+ elif opts.get('bundle'):
+ msgs = getbundlemsgs(getbundle(dest))
+ else:
+ msgs = getpatchmsgs(list(getpatches(revs)))
+
+ def getaddrs(opt, prpt, default = None):
+ addrs = opts.get(opt) or (ui.config('email', opt) or
+ ui.config('patchbomb', opt) or
+ prompt(ui, prpt, default)).split(',')
+ return [mail.addressencode(ui, a.strip(), _charsets, opts.get('test'))
+ for a in addrs if a.strip()]
+
+ to = getaddrs('to', 'To')
+ cc = getaddrs('cc', 'Cc', '')
+
+ bcc = opts.get('bcc') or (ui.config('email', 'bcc') or
+ ui.config('patchbomb', 'bcc') or '').split(',')
+ bcc = [mail.addressencode(ui, a.strip(), _charsets, opts.get('test'))
+ for a in bcc if a.strip()]
+
+ ui.write('\n')
+
+ parent = opts.get('in_reply_to') or None
+ # angle brackets may be omitted, they're not semantically part of the msg-id
+ if parent is not None:
+ if not parent.startswith('<'):
+ parent = '<' + parent
+ if not parent.endswith('>'):
+ parent += '>'
+
+ first = True
+
+ sender_addr = email.Utils.parseaddr(sender)[1]
+ sender = mail.addressencode(ui, sender, _charsets, opts.get('test'))
+ sendmail = None
+ for m, subj in msgs:
+ try:
+ m['Message-Id'] = genmsgid(m['X-Mercurial-Node'])
+ except TypeError:
+ m['Message-Id'] = genmsgid('patchbomb')
+ if parent:
+ m['In-Reply-To'] = parent
+ m['References'] = parent
+ if first:
+ parent = m['Message-Id']
+ first = False
+
+ m['User-Agent'] = 'Mercurial-patchbomb/%s' % util.version()
+ m['Date'] = email.Utils.formatdate(start_time[0], localtime=True)
+
+ start_time = (start_time[0] + 1, start_time[1])
+ m['From'] = sender
+ m['To'] = ', '.join(to)
+ if cc:
+ m['Cc'] = ', '.join(cc)
+ if bcc:
+ m['Bcc'] = ', '.join(bcc)
+ if opts.get('test'):
+ ui.status(_('Displaying '), subj, ' ...\n')
+ ui.flush()
+ if 'PAGER' in os.environ:
+ fp = util.popen(os.environ['PAGER'], 'w')
+ else:
+ fp = ui
+ generator = email.Generator.Generator(fp, mangle_from_=False)
+ try:
+ generator.flatten(m, 0)
+ fp.write('\n')
+ except IOError, inst:
+ if inst.errno != errno.EPIPE:
+ raise
+ if fp is not ui:
+ fp.close()
+ elif opts.get('mbox'):
+ ui.status(_('Writing '), subj, ' ...\n')
+ fp = open(opts.get('mbox'), 'In-Reply-To' in m and 'ab+' or 'wb+')
+ generator = email.Generator.Generator(fp, mangle_from_=True)
+ date = time.ctime(start_time[0])
+ fp.write('From %s %s\n' % (sender_addr, date))
+ generator.flatten(m, 0)
+ fp.write('\n\n')
+ fp.close()
+ else:
+ if not sendmail:
+ sendmail = mail.connect(ui)
+ ui.status(_('Sending '), subj, ' ...\n')
+ # Exim does not remove the Bcc field
+ del m['Bcc']
+ fp = cStringIO.StringIO()
+ generator = email.Generator.Generator(fp, mangle_from_=False)
+ generator.flatten(m, 0)
+ sendmail(sender, to + bcc + cc, fp.getvalue())
+
+emailopts = [
+ ('a', 'attach', None, _('send patches as attachments')),
+ ('i', 'inline', None, _('send patches as inline attachments')),
+ ('', 'bcc', [], _('email addresses of blind carbon copy recipients')),
+ ('c', 'cc', [], _('email addresses of copy recipients')),
+ ('d', 'diffstat', None, _('add diffstat output to messages')),
+ ('', 'date', '', _('use the given date as the sending date')),
+ ('', 'desc', '', _('use the given file as the series description')),
+ ('f', 'from', '', _('email address of sender')),
+ ('n', 'test', None, _('print messages that would be sent')),
+ ('m', 'mbox', '',
+ _('write messages to mbox file instead of sending them')),
+ ('s', 'subject', '',
+ _('subject of first message (intro or single patch)')),
+ ('', 'in-reply-to', '',
+ _('message identifier to reply to')),
+ ('', 'flag', [], _('flags to add in subject prefixes')),
+ ('t', 'to', [], _('email addresses of recipients')),
+ ]
+
+
+cmdtable = {
+ "email":
+ (patchbomb,
+ [('g', 'git', None, _('use git extended diff format')),
+ ('', 'plain', None, _('omit hg patch header')),
+ ('o', 'outgoing', None,
+ _('send changes not found in the target repository')),
+ ('b', 'bundle', None,
+ _('send changes not in target as a binary bundle')),
+ ('', 'bundlename', 'bundle',
+ _('name of the bundle attachment file')),
+ ('r', 'rev', [], _('a revision to send')),
+ ('', 'force', None,
+ _('run even when remote repository is unrelated '
+ '(with -b/--bundle)')),
+ ('', 'base', [],
+ _('a base changeset to specify instead of a destination '
+ '(with -b/--bundle)')),
+ ('', 'intro', None,
+ _('send an introduction email for a single patch')),
+ ] + emailopts + commands.remoteopts,
+ _('hg email [OPTION]... [DEST]...'))
+}
diff --git a/sys/src/cmd/hg/hgext/purge.py b/sys/src/cmd/hg/hgext/purge.py
new file mode 100644
index 000000000..3946ad0f5
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/purge.py
@@ -0,0 +1,111 @@
+# Copyright (C) 2006 - Marco Barisione <marco@barisione.org>
+#
+# This is a small extension for Mercurial (http://mercurial.selenic.com/)
+# that removes files not known to mercurial
+#
+# This program was inspired by the "cvspurge" script contained in CVS
+# utilities (http://www.red-bean.com/cvsutils/).
+#
+# For help on the usage of "hg purge" use:
+# hg help purge
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+'''command to delete untracked files from the working directory'''
+
+from mercurial import util, commands, cmdutil
+from mercurial.i18n import _
+import os, stat
+
+def purge(ui, repo, *dirs, **opts):
+ '''removes files not tracked by Mercurial
+
+ Delete files not known to Mercurial. This is useful to test local
+ and uncommitted changes in an otherwise-clean source tree.
+
+ This means that purge will delete:
+
+ - Unknown files: files marked with "?" by "hg status"
+ - Empty directories: in fact Mercurial ignores directories unless
+ they contain files under source control management
+
+ But it will leave untouched:
+
+ - Modified and unmodified tracked files
+ - Ignored files (unless --all is specified)
+ - New files added to the repository (with "hg add")
+
+ If directories are given on the command line, only files in these
+ directories are considered.
+
+ Be careful with purge, as you could irreversibly delete some files
+ you forgot to add to the repository. If you only want to print the
+ list of files that this program would delete, use the --print
+ option.
+ '''
+ act = not opts['print']
+ eol = '\n'
+ if opts['print0']:
+ eol = '\0'
+ act = False # --print0 implies --print
+
+ def remove(remove_func, name):
+ if act:
+ try:
+ remove_func(repo.wjoin(name))
+ except OSError:
+ m = _('%s cannot be removed') % name
+ if opts['abort_on_err']:
+ raise util.Abort(m)
+ ui.warn(_('warning: %s\n') % m)
+ else:
+ ui.write('%s%s' % (name, eol))
+
+ def removefile(path):
+ try:
+ os.remove(path)
+ except OSError:
+ # read-only files cannot be unlinked under Windows
+ s = os.stat(path)
+ if (s.st_mode & stat.S_IWRITE) != 0:
+ raise
+ os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
+ os.remove(path)
+
+ directories = []
+ match = cmdutil.match(repo, dirs, opts)
+ match.dir = directories.append
+ status = repo.status(match=match, ignored=opts['all'], unknown=True)
+
+ for f in sorted(status[4] + status[5]):
+ ui.note(_('Removing file %s\n') % f)
+ remove(removefile, f)
+
+ for f in sorted(directories, reverse=True):
+ if match(f) and not os.listdir(repo.wjoin(f)):
+ ui.note(_('Removing directory %s\n') % f)
+ remove(os.rmdir, f)
+
+cmdtable = {
+ 'purge|clean':
+ (purge,
+ [('a', 'abort-on-err', None, _('abort if an error occurs')),
+ ('', 'all', None, _('purge ignored files too')),
+ ('p', 'print', None, _('print filenames instead of deleting them')),
+ ('0', 'print0', None, _('end filenames with NUL, for use with xargs'
+ ' (implies -p/--print)')),
+ ] + commands.walkopts,
+ _('hg purge [OPTION]... [DIR]...'))
+}
diff --git a/sys/src/cmd/hg/hgext/rebase.py b/sys/src/cmd/hg/hgext/rebase.py
new file mode 100644
index 000000000..a1d030087
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/rebase.py
@@ -0,0 +1,471 @@
+# rebase.py - rebasing feature for mercurial
+#
+# Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''command to move sets of revisions to a different ancestor
+
+This extension lets you rebase changesets in an existing Mercurial
+repository.
+
+For more information:
+http://mercurial.selenic.com/wiki/RebaseExtension
+'''
+
+from mercurial import util, repair, merge, cmdutil, commands, error
+from mercurial import extensions, ancestor, copies, patch
+from mercurial.commands import templateopts
+from mercurial.node import nullrev
+from mercurial.lock import release
+from mercurial.i18n import _
+import os, errno
+
+def rebasemerge(repo, rev, first=False):
+ 'return the correct ancestor'
+ oldancestor = ancestor.ancestor
+
+ def newancestor(a, b, pfunc):
+ ancestor.ancestor = oldancestor
+ if b == rev:
+ return repo[rev].parents()[0].rev()
+ return ancestor.ancestor(a, b, pfunc)
+
+ if not first:
+ ancestor.ancestor = newancestor
+ else:
+ repo.ui.debug(_("first revision, do not change ancestor\n"))
+ stats = merge.update(repo, rev, True, True, False)
+ return stats
+
+def rebase(ui, repo, **opts):
+ """move changeset (and descendants) to a different branch
+
+ Rebase uses repeated merging to graft changesets from one part of
+ history onto another. This can be useful for linearizing local
+ changes relative to a master development tree.
+
+ If a rebase is interrupted to manually resolve a merge, it can be
+ continued with --continue/-c or aborted with --abort/-a.
+ """
+ originalwd = target = None
+ external = nullrev
+ state = {}
+ skipped = set()
+
+ lock = wlock = None
+ try:
+ lock = repo.lock()
+ wlock = repo.wlock()
+
+ # Validate input and define rebasing points
+ destf = opts.get('dest', None)
+ srcf = opts.get('source', None)
+ basef = opts.get('base', None)
+ contf = opts.get('continue')
+ abortf = opts.get('abort')
+ collapsef = opts.get('collapse', False)
+ extrafn = opts.get('extrafn')
+ keepf = opts.get('keep', False)
+ keepbranchesf = opts.get('keepbranches', False)
+
+ if contf or abortf:
+ if contf and abortf:
+ raise error.ParseError('rebase',
+ _('cannot use both abort and continue'))
+ if collapsef:
+ raise error.ParseError(
+ 'rebase', _('cannot use collapse with continue or abort'))
+
+ if srcf or basef or destf:
+ raise error.ParseError('rebase',
+ _('abort and continue do not allow specifying revisions'))
+
+ (originalwd, target, state, collapsef, keepf,
+ keepbranchesf, external) = restorestatus(repo)
+ if abortf:
+ abort(repo, originalwd, target, state)
+ return
+ else:
+ if srcf and basef:
+ raise error.ParseError('rebase', _('cannot specify both a '
+ 'revision and a base'))
+ cmdutil.bail_if_changed(repo)
+ result = buildstate(repo, destf, srcf, basef, collapsef)
+ if result:
+ originalwd, target, state, external = result
+ else: # Empty state built, nothing to rebase
+ ui.status(_('nothing to rebase\n'))
+ return
+
+ if keepbranchesf:
+ if extrafn:
+ raise error.ParseError(
+ 'rebase', _('cannot use both keepbranches and extrafn'))
+ def extrafn(ctx, extra):
+ extra['branch'] = ctx.branch()
+
+ # Rebase
+ targetancestors = list(repo.changelog.ancestors(target))
+ targetancestors.append(target)
+
+ for rev in sorted(state):
+ if state[rev] == -1:
+ storestatus(repo, originalwd, target, state, collapsef, keepf,
+ keepbranchesf, external)
+ rebasenode(repo, rev, target, state, skipped, targetancestors,
+ collapsef, extrafn)
+ ui.note(_('rebase merging completed\n'))
+
+ if collapsef:
+ p1, p2 = defineparents(repo, min(state), target,
+ state, targetancestors)
+ concludenode(repo, rev, p1, external, state, collapsef,
+ last=True, skipped=skipped, extrafn=extrafn)
+
+ if 'qtip' in repo.tags():
+ updatemq(repo, state, skipped, **opts)
+
+ if not keepf:
+ # Remove no more useful revisions
+ if set(repo.changelog.descendants(min(state))) - set(state):
+ ui.warn(_("warning: new changesets detected on source branch, "
+ "not stripping\n"))
+ else:
+ repair.strip(ui, repo, repo[min(state)].node(), "strip")
+
+ clearstatus(repo)
+ ui.status(_("rebase completed\n"))
+ if os.path.exists(repo.sjoin('undo')):
+ util.unlink(repo.sjoin('undo'))
+ if skipped:
+ ui.note(_("%d revisions have been skipped\n") % len(skipped))
+ finally:
+ release(lock, wlock)
+
+def concludenode(repo, rev, p1, p2, state, collapse, last=False, skipped=None,
+ extrafn=None):
+ """Skip commit if collapsing has been required and rev is not the last
+ revision, commit otherwise
+ """
+ repo.ui.debug(_(" set parents\n"))
+ if collapse and not last:
+ repo.dirstate.setparents(repo[p1].node())
+ return None
+
+ repo.dirstate.setparents(repo[p1].node(), repo[p2].node())
+
+ if skipped is None:
+ skipped = set()
+
+ # Commit, record the old nodeid
+ newrev = nullrev
+ try:
+ if last:
+ # we don't translate commit messages
+ commitmsg = 'Collapsed revision'
+ for rebased in state:
+ if rebased not in skipped:
+ commitmsg += '\n* %s' % repo[rebased].description()
+ commitmsg = repo.ui.edit(commitmsg, repo.ui.username())
+ else:
+ commitmsg = repo[rev].description()
+ # Commit might fail if unresolved files exist
+ extra = {'rebase_source': repo[rev].hex()}
+ if extrafn:
+ extrafn(repo[rev], extra)
+ newrev = repo.commit(text=commitmsg, user=repo[rev].user(),
+ date=repo[rev].date(), extra=extra)
+ repo.dirstate.setbranch(repo[newrev].branch())
+ return newrev
+ except util.Abort:
+ # Invalidate the previous setparents
+ repo.dirstate.invalidate()
+ raise
+
+def rebasenode(repo, rev, target, state, skipped, targetancestors, collapse,
+ extrafn):
+ 'Rebase a single revision'
+ repo.ui.debug(_("rebasing %d:%s\n") % (rev, repo[rev]))
+
+ p1, p2 = defineparents(repo, rev, target, state, targetancestors)
+
+ repo.ui.debug(_(" future parents are %d and %d\n") % (repo[p1].rev(),
+ repo[p2].rev()))
+
+ # Merge phase
+ if len(repo.parents()) != 2:
+ # Update to target and merge it with local
+ if repo['.'].rev() != repo[p1].rev():
+ repo.ui.debug(_(" update to %d:%s\n") % (repo[p1].rev(), repo[p1]))
+ merge.update(repo, p1, False, True, False)
+ else:
+ repo.ui.debug(_(" already in target\n"))
+ repo.dirstate.write()
+ repo.ui.debug(_(" merge against %d:%s\n") % (repo[rev].rev(), repo[rev]))
+ first = repo[rev].rev() == repo[min(state)].rev()
+ stats = rebasemerge(repo, rev, first)
+
+ if stats[3] > 0:
+ raise util.Abort(_('fix unresolved conflicts with hg resolve then '
+ 'run hg rebase --continue'))
+ else: # we have an interrupted rebase
+ repo.ui.debug(_('resuming interrupted rebase\n'))
+
+ # Keep track of renamed files in the revision that is going to be rebased
+ # Here we simulate the copies and renames in the source changeset
+ cop, diver = copies.copies(repo, repo[rev], repo[target], repo[p2], True)
+ m1 = repo[rev].manifest()
+ m2 = repo[target].manifest()
+ for k, v in cop.iteritems():
+ if k in m1:
+ if v in m1 or v in m2:
+ repo.dirstate.copy(v, k)
+ if v in m2 and v not in m1:
+ repo.dirstate.remove(v)
+
+ newrev = concludenode(repo, rev, p1, p2, state, collapse,
+ extrafn=extrafn)
+
+ # Update the state
+ if newrev is not None:
+ state[rev] = repo[newrev].rev()
+ else:
+ if not collapse:
+ repo.ui.note(_('no changes, revision %d skipped\n') % rev)
+ repo.ui.debug(_('next revision set to %s\n') % p1)
+ skipped.add(rev)
+ state[rev] = p1
+
+def defineparents(repo, rev, target, state, targetancestors):
+ 'Return the new parent relationship of the revision that will be rebased'
+ parents = repo[rev].parents()
+ p1 = p2 = nullrev
+
+ P1n = parents[0].rev()
+ if P1n in targetancestors:
+ p1 = target
+ elif P1n in state:
+ p1 = state[P1n]
+ else: # P1n external
+ p1 = target
+ p2 = P1n
+
+ if len(parents) == 2 and parents[1].rev() not in targetancestors:
+ P2n = parents[1].rev()
+ # interesting second parent
+ if P2n in state:
+ if p1 == target: # P1n in targetancestors or external
+ p1 = state[P2n]
+ else:
+ p2 = state[P2n]
+ else: # P2n external
+ if p2 != nullrev: # P1n external too => rev is a merged revision
+ raise util.Abort(_('cannot use revision %d as base, result '
+ 'would have 3 parents') % rev)
+ p2 = P2n
+ return p1, p2
+
+def isagitpatch(repo, patchname):
+ 'Return true if the given patch is in git format'
+ mqpatch = os.path.join(repo.mq.path, patchname)
+ for line in patch.linereader(file(mqpatch, 'rb')):
+ if line.startswith('diff --git'):
+ return True
+ return False
+
+def updatemq(repo, state, skipped, **opts):
+ 'Update rebased mq patches - finalize and then import them'
+ mqrebase = {}
+ for p in repo.mq.applied:
+ if repo[p.rev].rev() in state:
+ repo.ui.debug(_('revision %d is an mq patch (%s), finalize it.\n') %
+ (repo[p.rev].rev(), p.name))
+ mqrebase[repo[p.rev].rev()] = (p.name, isagitpatch(repo, p.name))
+
+ if mqrebase:
+ repo.mq.finish(repo, mqrebase.keys())
+
+ # We must start import from the newest revision
+ for rev in sorted(mqrebase, reverse=True):
+ if rev not in skipped:
+ repo.ui.debug(_('import mq patch %d (%s)\n')
+ % (state[rev], mqrebase[rev][0]))
+ repo.mq.qimport(repo, (), patchname=mqrebase[rev][0],
+ git=mqrebase[rev][1],rev=[str(state[rev])])
+ repo.mq.save_dirty()
+
+def storestatus(repo, originalwd, target, state, collapse, keep, keepbranches,
+ external):
+ 'Store the current status to allow recovery'
+ f = repo.opener("rebasestate", "w")
+ f.write(repo[originalwd].hex() + '\n')
+ f.write(repo[target].hex() + '\n')
+ f.write(repo[external].hex() + '\n')
+ f.write('%d\n' % int(collapse))
+ f.write('%d\n' % int(keep))
+ f.write('%d\n' % int(keepbranches))
+ for d, v in state.iteritems():
+ oldrev = repo[d].hex()
+ newrev = repo[v].hex()
+ f.write("%s:%s\n" % (oldrev, newrev))
+ f.close()
+ repo.ui.debug(_('rebase status stored\n'))
+
+def clearstatus(repo):
+ 'Remove the status files'
+ if os.path.exists(repo.join("rebasestate")):
+ util.unlink(repo.join("rebasestate"))
+
+def restorestatus(repo):
+ 'Restore a previously stored status'
+ try:
+ target = None
+ collapse = False
+ external = nullrev
+ state = {}
+ f = repo.opener("rebasestate")
+ for i, l in enumerate(f.read().splitlines()):
+ if i == 0:
+ originalwd = repo[l].rev()
+ elif i == 1:
+ target = repo[l].rev()
+ elif i == 2:
+ external = repo[l].rev()
+ elif i == 3:
+ collapse = bool(int(l))
+ elif i == 4:
+ keep = bool(int(l))
+ elif i == 5:
+ keepbranches = bool(int(l))
+ else:
+ oldrev, newrev = l.split(':')
+ state[repo[oldrev].rev()] = repo[newrev].rev()
+ repo.ui.debug(_('rebase status resumed\n'))
+ return originalwd, target, state, collapse, keep, keepbranches, external
+ except IOError, err:
+ if err.errno != errno.ENOENT:
+ raise
+ raise util.Abort(_('no rebase in progress'))
+
+def abort(repo, originalwd, target, state):
+ 'Restore the repository to its original state'
+ if set(repo.changelog.descendants(target)) - set(state.values()):
+ repo.ui.warn(_("warning: new changesets detected on target branch, "
+ "not stripping\n"))
+ else:
+ # Strip from the first rebased revision
+ merge.update(repo, repo[originalwd].rev(), False, True, False)
+ rebased = filter(lambda x: x > -1, state.values())
+ if rebased:
+ strippoint = min(rebased)
+ repair.strip(repo.ui, repo, repo[strippoint].node(), "strip")
+ clearstatus(repo)
+ repo.ui.status(_('rebase aborted\n'))
+
+def buildstate(repo, dest, src, base, collapse):
+ 'Define which revisions are going to be rebased and where'
+ targetancestors = set()
+
+ if not dest:
+ # Destination defaults to the latest revision in the current branch
+ branch = repo[None].branch()
+ dest = repo[branch].rev()
+ else:
+ if 'qtip' in repo.tags() and (repo[dest].hex() in
+ [s.rev for s in repo.mq.applied]):
+ raise util.Abort(_('cannot rebase onto an applied mq patch'))
+ dest = repo[dest].rev()
+
+ if src:
+ commonbase = repo[src].ancestor(repo[dest])
+ if commonbase == repo[src]:
+ raise util.Abort(_('cannot rebase an ancestor'))
+ if commonbase == repo[dest]:
+ raise util.Abort(_('cannot rebase a descendant'))
+ source = repo[src].rev()
+ else:
+ if base:
+ cwd = repo[base].rev()
+ else:
+ cwd = repo['.'].rev()
+
+ if cwd == dest:
+ repo.ui.debug(_('already working on current\n'))
+ return None
+
+ targetancestors = set(repo.changelog.ancestors(dest))
+ if cwd in targetancestors:
+ repo.ui.debug(_('already working on the current branch\n'))
+ return None
+
+ cwdancestors = set(repo.changelog.ancestors(cwd))
+ cwdancestors.add(cwd)
+ rebasingbranch = cwdancestors - targetancestors
+ source = min(rebasingbranch)
+
+ repo.ui.debug(_('rebase onto %d starting from %d\n') % (dest, source))
+ state = dict.fromkeys(repo.changelog.descendants(source), nullrev)
+ external = nullrev
+ if collapse:
+ if not targetancestors:
+ targetancestors = set(repo.changelog.ancestors(dest))
+ for rev in state:
+ # Check externals and fail if there are more than one
+ for p in repo[rev].parents():
+ if (p.rev() not in state and p.rev() != source
+ and p.rev() not in targetancestors):
+ if external != nullrev:
+ raise util.Abort(_('unable to collapse, there is more '
+ 'than one external parent'))
+ external = p.rev()
+
+ state[source] = nullrev
+ return repo['.'].rev(), repo[dest].rev(), state, external
+
+def pullrebase(orig, ui, repo, *args, **opts):
+ 'Call rebase after pull if the latter has been invoked with --rebase'
+ if opts.get('rebase'):
+ if opts.get('update'):
+ del opts['update']
+ ui.debug(_('--update and --rebase are not compatible, ignoring '
+ 'the update flag\n'))
+
+ cmdutil.bail_if_changed(repo)
+ revsprepull = len(repo)
+ orig(ui, repo, *args, **opts)
+ revspostpull = len(repo)
+ if revspostpull > revsprepull:
+ rebase(ui, repo, **opts)
+ branch = repo[None].branch()
+ dest = repo[branch].rev()
+ if dest != repo['.'].rev():
+ # there was nothing to rebase we force an update
+ merge.update(repo, dest, False, False, False)
+ else:
+ orig(ui, repo, *args, **opts)
+
+def uisetup(ui):
+ 'Replace pull with a decorator to provide --rebase option'
+ entry = extensions.wrapcommand(commands.table, 'pull', pullrebase)
+ entry[1].append(('', 'rebase', None,
+ _("rebase working directory to branch head"))
+)
+
+cmdtable = {
+"rebase":
+ (rebase,
+ [
+ ('s', 'source', '', _('rebase from a given revision')),
+ ('b', 'base', '', _('rebase from the base of a given revision')),
+ ('d', 'dest', '', _('rebase onto a given revision')),
+ ('', 'collapse', False, _('collapse the rebased revisions')),
+ ('', 'keep', False, _('keep original revisions')),
+ ('', 'keepbranches', False, _('keep original branches')),
+ ('c', 'continue', False, _('continue an interrupted rebase')),
+ ('a', 'abort', False, _('abort an interrupted rebase')),] +
+ templateopts,
+ _('hg rebase [-s REV | -b REV] [-d REV] [--collapse] [--keep] '
+ '[--keepbranches] | [-c] | [-a]')),
+}
diff --git a/sys/src/cmd/hg/hgext/record.py b/sys/src/cmd/hg/hgext/record.py
new file mode 100644
index 000000000..71a4f13c7
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/record.py
@@ -0,0 +1,551 @@
+# record.py
+#
+# Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''commands to interactively select changes for commit/qrefresh'''
+
+from mercurial.i18n import gettext, _
+from mercurial import cmdutil, commands, extensions, hg, mdiff, patch
+from mercurial import util
+import copy, cStringIO, errno, operator, os, re, tempfile
+
+lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
+
+def scanpatch(fp):
+ """like patch.iterhunks, but yield different events
+
+ - ('file', [header_lines + fromfile + tofile])
+ - ('context', [context_lines])
+ - ('hunk', [hunk_lines])
+ - ('range', (-start,len, +start,len, diffp))
+ """
+ lr = patch.linereader(fp)
+
+ def scanwhile(first, p):
+ """scan lr while predicate holds"""
+ lines = [first]
+ while True:
+ line = lr.readline()
+ if not line:
+ break
+ if p(line):
+ lines.append(line)
+ else:
+ lr.push(line)
+ break
+ return lines
+
+ while True:
+ line = lr.readline()
+ if not line:
+ break
+ if line.startswith('diff --git a/'):
+ def notheader(line):
+ s = line.split(None, 1)
+ return not s or s[0] not in ('---', 'diff')
+ header = scanwhile(line, notheader)
+ fromfile = lr.readline()
+ if fromfile.startswith('---'):
+ tofile = lr.readline()
+ header += [fromfile, tofile]
+ else:
+ lr.push(fromfile)
+ yield 'file', header
+ elif line[0] == ' ':
+ yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
+ elif line[0] in '-+':
+ yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
+ else:
+ m = lines_re.match(line)
+ if m:
+ yield 'range', m.groups()
+ else:
+ raise patch.PatchError('unknown patch content: %r' % line)
+
+class header(object):
+ """patch header
+
+ XXX shoudn't we move this to mercurial/patch.py ?
+ """
+ diff_re = re.compile('diff --git a/(.*) b/(.*)$')
+ allhunks_re = re.compile('(?:index|new file|deleted file) ')
+ pretty_re = re.compile('(?:new file|deleted file) ')
+ special_re = re.compile('(?:index|new|deleted|copy|rename) ')
+
+ def __init__(self, header):
+ self.header = header
+ self.hunks = []
+
+ def binary(self):
+ for h in self.header:
+ if h.startswith('index '):
+ return True
+
+ def pretty(self, fp):
+ for h in self.header:
+ if h.startswith('index '):
+ fp.write(_('this modifies a binary file (all or nothing)\n'))
+ break
+ if self.pretty_re.match(h):
+ fp.write(h)
+ if self.binary():
+ fp.write(_('this is a binary file\n'))
+ break
+ if h.startswith('---'):
+ fp.write(_('%d hunks, %d lines changed\n') %
+ (len(self.hunks),
+ sum([h.added + h.removed for h in self.hunks])))
+ break
+ fp.write(h)
+
+ def write(self, fp):
+ fp.write(''.join(self.header))
+
+ def allhunks(self):
+ for h in self.header:
+ if self.allhunks_re.match(h):
+ return True
+
+ def files(self):
+ fromfile, tofile = self.diff_re.match(self.header[0]).groups()
+ if fromfile == tofile:
+ return [fromfile]
+ return [fromfile, tofile]
+
+ def filename(self):
+ return self.files()[-1]
+
+ def __repr__(self):
+ return '<header %s>' % (' '.join(map(repr, self.files())))
+
+ def special(self):
+ for h in self.header:
+ if self.special_re.match(h):
+ return True
+
+def countchanges(hunk):
+ """hunk -> (n+,n-)"""
+ add = len([h for h in hunk if h[0] == '+'])
+ rem = len([h for h in hunk if h[0] == '-'])
+ return add, rem
+
+class hunk(object):
+ """patch hunk
+
+ XXX shouldn't we merge this with patch.hunk ?
+ """
+ maxcontext = 3
+
+ def __init__(self, header, fromline, toline, proc, before, hunk, after):
+ def trimcontext(number, lines):
+ delta = len(lines) - self.maxcontext
+ if False and delta > 0:
+ return number + delta, lines[:self.maxcontext]
+ return number, lines
+
+ self.header = header
+ self.fromline, self.before = trimcontext(fromline, before)
+ self.toline, self.after = trimcontext(toline, after)
+ self.proc = proc
+ self.hunk = hunk
+ self.added, self.removed = countchanges(self.hunk)
+
+ def write(self, fp):
+ delta = len(self.before) + len(self.after)
+ if self.after and self.after[-1] == '\\ No newline at end of file\n':
+ delta -= 1
+ fromlen = delta + self.removed
+ tolen = delta + self.added
+ fp.write('@@ -%d,%d +%d,%d @@%s\n' %
+ (self.fromline, fromlen, self.toline, tolen,
+ self.proc and (' ' + self.proc)))
+ fp.write(''.join(self.before + self.hunk + self.after))
+
+ pretty = write
+
+ def filename(self):
+ return self.header.filename()
+
+ def __repr__(self):
+ return '<hunk %r@%d>' % (self.filename(), self.fromline)
+
+def parsepatch(fp):
+ """patch -> [] of hunks """
+ class parser(object):
+ """patch parsing state machine"""
+ def __init__(self):
+ self.fromline = 0
+ self.toline = 0
+ self.proc = ''
+ self.header = None
+ self.context = []
+ self.before = []
+ self.hunk = []
+ self.stream = []
+
+ def addrange(self, (fromstart, fromend, tostart, toend, proc)):
+ self.fromline = int(fromstart)
+ self.toline = int(tostart)
+ self.proc = proc
+
+ def addcontext(self, context):
+ if self.hunk:
+ h = hunk(self.header, self.fromline, self.toline, self.proc,
+ self.before, self.hunk, context)
+ self.header.hunks.append(h)
+ self.stream.append(h)
+ self.fromline += len(self.before) + h.removed
+ self.toline += len(self.before) + h.added
+ self.before = []
+ self.hunk = []
+ self.proc = ''
+ self.context = context
+
+ def addhunk(self, hunk):
+ if self.context:
+ self.before = self.context
+ self.context = []
+ self.hunk = hunk
+
+ def newfile(self, hdr):
+ self.addcontext([])
+ h = header(hdr)
+ self.stream.append(h)
+ self.header = h
+
+ def finished(self):
+ self.addcontext([])
+ return self.stream
+
+ transitions = {
+ 'file': {'context': addcontext,
+ 'file': newfile,
+ 'hunk': addhunk,
+ 'range': addrange},
+ 'context': {'file': newfile,
+ 'hunk': addhunk,
+ 'range': addrange},
+ 'hunk': {'context': addcontext,
+ 'file': newfile,
+ 'range': addrange},
+ 'range': {'context': addcontext,
+ 'hunk': addhunk},
+ }
+
+ p = parser()
+
+ state = 'context'
+ for newstate, data in scanpatch(fp):
+ try:
+ p.transitions[state][newstate](p, data)
+ except KeyError:
+ raise patch.PatchError('unhandled transition: %s -> %s' %
+ (state, newstate))
+ state = newstate
+ return p.finished()
+
+def filterpatch(ui, chunks):
+ """Interactively filter patch chunks into applied-only chunks"""
+ chunks = list(chunks)
+ chunks.reverse()
+ seen = set()
+ def consumefile():
+ """fetch next portion from chunks until a 'header' is seen
+ NB: header == new-file mark
+ """
+ consumed = []
+ while chunks:
+ if isinstance(chunks[-1], header):
+ break
+ else:
+ consumed.append(chunks.pop())
+ return consumed
+
+ resp_all = [None] # this two are changed from inside prompt,
+ resp_file = [None] # so can't be usual variables
+ applied = {} # 'filename' -> [] of chunks
+ def prompt(query):
+ """prompt query, and process base inputs
+
+ - y/n for the rest of file
+ - y/n for the rest
+ - ? (help)
+ - q (quit)
+
+ else, input is returned to the caller.
+ """
+ if resp_all[0] is not None:
+ return resp_all[0]
+ if resp_file[0] is not None:
+ return resp_file[0]
+ while True:
+ resps = _('[Ynsfdaq?]')
+ choices = (_('&Yes, record this change'),
+ _('&No, skip this change'),
+ _('&Skip remaining changes to this file'),
+ _('Record remaining changes to this &file'),
+ _('&Done, skip remaining changes and files'),
+ _('Record &all changes to all remaining files'),
+ _('&Quit, recording no changes'),
+ _('&?'))
+ r = ui.promptchoice("%s %s " % (query, resps), choices)
+ if r == 7: # ?
+ doc = gettext(record.__doc__)
+ c = doc.find(_('y - record this change'))
+ for l in doc[c:].splitlines():
+ if l: ui.write(l.strip(), '\n')
+ continue
+ elif r == 0: # yes
+ ret = 'y'
+ elif r == 1: # no
+ ret = 'n'
+ elif r == 2: # Skip
+ ret = resp_file[0] = 'n'
+ elif r == 3: # file (Record remaining)
+ ret = resp_file[0] = 'y'
+ elif r == 4: # done, skip remaining
+ ret = resp_all[0] = 'n'
+ elif r == 5: # all
+ ret = resp_all[0] = 'y'
+ elif r == 6: # quit
+ raise util.Abort(_('user quit'))
+ return ret
+ pos, total = 0, len(chunks) - 1
+ while chunks:
+ chunk = chunks.pop()
+ if isinstance(chunk, header):
+ # new-file mark
+ resp_file = [None]
+ fixoffset = 0
+ hdr = ''.join(chunk.header)
+ if hdr in seen:
+ consumefile()
+ continue
+ seen.add(hdr)
+ if resp_all[0] is None:
+ chunk.pretty(ui)
+ r = prompt(_('examine changes to %s?') %
+ _(' and ').join(map(repr, chunk.files())))
+ if r == _('y'):
+ applied[chunk.filename()] = [chunk]
+ if chunk.allhunks():
+ applied[chunk.filename()] += consumefile()
+ else:
+ consumefile()
+ else:
+ # new hunk
+ if resp_file[0] is None and resp_all[0] is None:
+ chunk.pretty(ui)
+ r = total == 1 and prompt(_('record this change to %r?') %
+ chunk.filename()) \
+ or prompt(_('record change %d/%d to %r?') %
+ (pos, total, chunk.filename()))
+ if r == _('y'):
+ if fixoffset:
+ chunk = copy.copy(chunk)
+ chunk.toline += fixoffset
+ applied[chunk.filename()].append(chunk)
+ else:
+ fixoffset += chunk.removed - chunk.added
+ pos = pos + 1
+ return reduce(operator.add, [h for h in applied.itervalues()
+ if h[0].special() or len(h) > 1], [])
+
+def record(ui, repo, *pats, **opts):
+ '''interactively select changes to commit
+
+ If a list of files is omitted, all changes reported by "hg status"
+ will be candidates for recording.
+
+ See 'hg help dates' for a list of formats valid for -d/--date.
+
+ You will be prompted for whether to record changes to each
+ modified file, and for files with multiple changes, for each
+ change to use. For each query, the following responses are
+ possible::
+
+ y - record this change
+ n - skip this change
+
+ s - skip remaining changes to this file
+ f - record remaining changes to this file
+
+ d - done, skip remaining changes and files
+ a - record all changes to all remaining files
+ q - quit, recording no changes
+
+ ? - display help'''
+
+ def record_committer(ui, repo, pats, opts):
+ commands.commit(ui, repo, *pats, **opts)
+
+ dorecord(ui, repo, record_committer, *pats, **opts)
+
+
+def qrecord(ui, repo, patch, *pats, **opts):
+ '''interactively record a new patch
+
+ See 'hg help qnew' & 'hg help record' for more information and
+ usage.
+ '''
+
+ try:
+ mq = extensions.find('mq')
+ except KeyError:
+ raise util.Abort(_("'mq' extension not loaded"))
+
+ def qrecord_committer(ui, repo, pats, opts):
+ mq.new(ui, repo, patch, *pats, **opts)
+
+ opts = opts.copy()
+ opts['force'] = True # always 'qnew -f'
+ dorecord(ui, repo, qrecord_committer, *pats, **opts)
+
+
+def dorecord(ui, repo, committer, *pats, **opts):
+ if not ui.interactive():
+ raise util.Abort(_('running non-interactively, use commit instead'))
+
+ def recordfunc(ui, repo, message, match, opts):
+ """This is generic record driver.
+
+ Its job is to interactively filter local changes, and accordingly
+ prepare working dir into a state, where the job can be delegated to
+ non-interactive commit command such as 'commit' or 'qrefresh'.
+
+ After the actual job is done by non-interactive command, working dir
+ state is restored to original.
+
+ In the end we'll record intresting changes, and everything else will be
+ left in place, so the user can continue his work.
+ """
+
+ changes = repo.status(match=match)[:3]
+ diffopts = mdiff.diffopts(git=True, nodates=True)
+ chunks = patch.diff(repo, changes=changes, opts=diffopts)
+ fp = cStringIO.StringIO()
+ fp.write(''.join(chunks))
+ fp.seek(0)
+
+ # 1. filter patch, so we have intending-to apply subset of it
+ chunks = filterpatch(ui, parsepatch(fp))
+ del fp
+
+ contenders = set()
+ for h in chunks:
+ try: contenders.update(set(h.files()))
+ except AttributeError: pass
+
+ changed = changes[0] + changes[1] + changes[2]
+ newfiles = [f for f in changed if f in contenders]
+ if not newfiles:
+ ui.status(_('no changes to record\n'))
+ return 0
+
+ modified = set(changes[0])
+
+ # 2. backup changed files, so we can restore them in the end
+ backups = {}
+ backupdir = repo.join('record-backups')
+ try:
+ os.mkdir(backupdir)
+ except OSError, err:
+ if err.errno != errno.EEXIST:
+ raise
+ try:
+ # backup continues
+ for f in newfiles:
+ if f not in modified:
+ continue
+ fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
+ dir=backupdir)
+ os.close(fd)
+ ui.debug(_('backup %r as %r\n') % (f, tmpname))
+ util.copyfile(repo.wjoin(f), tmpname)
+ backups[f] = tmpname
+
+ fp = cStringIO.StringIO()
+ for c in chunks:
+ if c.filename() in backups:
+ c.write(fp)
+ dopatch = fp.tell()
+ fp.seek(0)
+
+ # 3a. apply filtered patch to clean repo (clean)
+ if backups:
+ hg.revert(repo, repo.dirstate.parents()[0], backups.has_key)
+
+ # 3b. (apply)
+ if dopatch:
+ try:
+ ui.debug(_('applying patch\n'))
+ ui.debug(fp.getvalue())
+ pfiles = {}
+ patch.internalpatch(fp, ui, 1, repo.root, files=pfiles,
+ eolmode=None)
+ patch.updatedir(ui, repo, pfiles)
+ except patch.PatchError, err:
+ s = str(err)
+ if s:
+ raise util.Abort(s)
+ else:
+ raise util.Abort(_('patch failed to apply'))
+ del fp
+
+ # 4. We prepared working directory according to filtered patch.
+ # Now is the time to delegate the job to commit/qrefresh or the like!
+
+ # it is important to first chdir to repo root -- we'll call a
+ # highlevel command with list of pathnames relative to repo root
+ cwd = os.getcwd()
+ os.chdir(repo.root)
+ try:
+ committer(ui, repo, newfiles, opts)
+ finally:
+ os.chdir(cwd)
+
+ return 0
+ finally:
+ # 5. finally restore backed-up files
+ try:
+ for realname, tmpname in backups.iteritems():
+ ui.debug(_('restoring %r to %r\n') % (tmpname, realname))
+ util.copyfile(tmpname, repo.wjoin(realname))
+ os.unlink(tmpname)
+ os.rmdir(backupdir)
+ except OSError:
+ pass
+ return cmdutil.commit(ui, repo, recordfunc, pats, opts)
+
+cmdtable = {
+ "record":
+ (record,
+
+ # add commit options
+ commands.table['^commit|ci'][1],
+
+ _('hg record [OPTION]... [FILE]...')),
+}
+
+
+def extsetup():
+ try:
+ mq = extensions.find('mq')
+ except KeyError:
+ return
+
+ qcmdtable = {
+ "qrecord":
+ (qrecord,
+
+ # add qnew options, except '--force'
+ [opt for opt in mq.cmdtable['qnew'][1] if opt[1] != 'force'],
+
+ _('hg qrecord [OPTION]... PATCH [FILE]...')),
+ }
+
+ cmdtable.update(qcmdtable)
+
diff --git a/sys/src/cmd/hg/hgext/share.py b/sys/src/cmd/hg/hgext/share.py
new file mode 100644
index 000000000..e714ce0aa
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/share.py
@@ -0,0 +1,30 @@
+# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''share a common history between several working directories'''
+
+from mercurial.i18n import _
+from mercurial import hg, commands
+
+def share(ui, source, dest=None, noupdate=False):
+ """create a new shared repository (experimental)
+
+ Initialize a new repository and working directory that shares its
+ history with another repository.
+
+ NOTE: actions that change history such as rollback or moving the
+ source may confuse sharers.
+ """
+
+ return hg.share(ui, source, dest, not noupdate)
+
+cmdtable = {
+ "share":
+ (share,
+ [('U', 'noupdate', None, _('do not create a working copy'))],
+ _('[-U] SOURCE [DEST]')),
+}
+
+commands.norepo += " share"
diff --git a/sys/src/cmd/hg/hgext/transplant.py b/sys/src/cmd/hg/hgext/transplant.py
new file mode 100644
index 000000000..1d26c7efd
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/transplant.py
@@ -0,0 +1,606 @@
+# Patch transplanting extension for Mercurial
+#
+# Copyright 2006, 2007 Brendan Cully <brendan@kublai.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''command to transplant changesets from another branch
+
+This extension allows you to transplant patches from another branch.
+
+Transplanted patches are recorded in .hg/transplant/transplants, as a
+map from a changeset hash to its hash in the source repository.
+'''
+
+from mercurial.i18n import _
+import os, tempfile
+from mercurial import bundlerepo, changegroup, cmdutil, hg, merge, match
+from mercurial import patch, revlog, util, error
+
+class transplantentry(object):
+ def __init__(self, lnode, rnode):
+ self.lnode = lnode
+ self.rnode = rnode
+
+class transplants(object):
+ def __init__(self, path=None, transplantfile=None, opener=None):
+ self.path = path
+ self.transplantfile = transplantfile
+ self.opener = opener
+
+ if not opener:
+ self.opener = util.opener(self.path)
+ self.transplants = []
+ self.dirty = False
+ self.read()
+
+ def read(self):
+ abspath = os.path.join(self.path, self.transplantfile)
+ if self.transplantfile and os.path.exists(abspath):
+ for line in self.opener(self.transplantfile).read().splitlines():
+ lnode, rnode = map(revlog.bin, line.split(':'))
+ self.transplants.append(transplantentry(lnode, rnode))
+
+ def write(self):
+ if self.dirty and self.transplantfile:
+ if not os.path.isdir(self.path):
+ os.mkdir(self.path)
+ fp = self.opener(self.transplantfile, 'w')
+ for c in self.transplants:
+ l, r = map(revlog.hex, (c.lnode, c.rnode))
+ fp.write(l + ':' + r + '\n')
+ fp.close()
+ self.dirty = False
+
+ def get(self, rnode):
+ return [t for t in self.transplants if t.rnode == rnode]
+
+ def set(self, lnode, rnode):
+ self.transplants.append(transplantentry(lnode, rnode))
+ self.dirty = True
+
+ def remove(self, transplant):
+ del self.transplants[self.transplants.index(transplant)]
+ self.dirty = True
+
+class transplanter(object):
+ def __init__(self, ui, repo):
+ self.ui = ui
+ self.path = repo.join('transplant')
+ self.opener = util.opener(self.path)
+ self.transplants = transplants(self.path, 'transplants',
+ opener=self.opener)
+
+ def applied(self, repo, node, parent):
+ '''returns True if a node is already an ancestor of parent
+ or has already been transplanted'''
+ if hasnode(repo, node):
+ if node in repo.changelog.reachable(parent, stop=node):
+ return True
+ for t in self.transplants.get(node):
+ # it might have been stripped
+ if not hasnode(repo, t.lnode):
+ self.transplants.remove(t)
+ return False
+ if t.lnode in repo.changelog.reachable(parent, stop=t.lnode):
+ return True
+ return False
+
+ def apply(self, repo, source, revmap, merges, opts={}):
+ '''apply the revisions in revmap one by one in revision order'''
+ revs = sorted(revmap)
+ p1, p2 = repo.dirstate.parents()
+ pulls = []
+ diffopts = patch.diffopts(self.ui, opts)
+ diffopts.git = True
+
+ lock = wlock = None
+ try:
+ wlock = repo.wlock()
+ lock = repo.lock()
+ for rev in revs:
+ node = revmap[rev]
+ revstr = '%s:%s' % (rev, revlog.short(node))
+
+ if self.applied(repo, node, p1):
+ self.ui.warn(_('skipping already applied revision %s\n') %
+ revstr)
+ continue
+
+ parents = source.changelog.parents(node)
+ if not opts.get('filter'):
+ # If the changeset parent is the same as the
+ # wdir's parent, just pull it.
+ if parents[0] == p1:
+ pulls.append(node)
+ p1 = node
+ continue
+ if pulls:
+ if source != repo:
+ repo.pull(source, heads=pulls)
+ merge.update(repo, pulls[-1], False, False, None)
+ p1, p2 = repo.dirstate.parents()
+ pulls = []
+
+ domerge = False
+ if node in merges:
+ # pulling all the merge revs at once would mean we
+ # couldn't transplant after the latest even if
+ # transplants before them fail.
+ domerge = True
+ if not hasnode(repo, node):
+ repo.pull(source, heads=[node])
+
+ if parents[1] != revlog.nullid:
+ self.ui.note(_('skipping merge changeset %s:%s\n')
+ % (rev, revlog.short(node)))
+ patchfile = None
+ else:
+ fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-')
+ fp = os.fdopen(fd, 'w')
+ gen = patch.diff(source, parents[0], node, opts=diffopts)
+ for chunk in gen:
+ fp.write(chunk)
+ fp.close()
+
+ del revmap[rev]
+ if patchfile or domerge:
+ try:
+ n = self.applyone(repo, node,
+ source.changelog.read(node),
+ patchfile, merge=domerge,
+ log=opts.get('log'),
+ filter=opts.get('filter'))
+ if n and domerge:
+ self.ui.status(_('%s merged at %s\n') % (revstr,
+ revlog.short(n)))
+ elif n:
+ self.ui.status(_('%s transplanted to %s\n')
+ % (revlog.short(node),
+ revlog.short(n)))
+ finally:
+ if patchfile:
+ os.unlink(patchfile)
+ if pulls:
+ repo.pull(source, heads=pulls)
+ merge.update(repo, pulls[-1], False, False, None)
+ finally:
+ self.saveseries(revmap, merges)
+ self.transplants.write()
+ lock.release()
+ wlock.release()
+
+ def filter(self, filter, changelog, patchfile):
+ '''arbitrarily rewrite changeset before applying it'''
+
+ self.ui.status(_('filtering %s\n') % patchfile)
+ user, date, msg = (changelog[1], changelog[2], changelog[4])
+
+ fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-')
+ fp = os.fdopen(fd, 'w')
+ fp.write("# HG changeset patch\n")
+ fp.write("# User %s\n" % user)
+ fp.write("# Date %d %d\n" % date)
+ fp.write(changelog[4])
+ fp.close()
+
+ try:
+ util.system('%s %s %s' % (filter, util.shellquote(headerfile),
+ util.shellquote(patchfile)),
+ environ={'HGUSER': changelog[1]},
+ onerr=util.Abort, errprefix=_('filter failed'))
+ user, date, msg = self.parselog(file(headerfile))[1:4]
+ finally:
+ os.unlink(headerfile)
+
+ return (user, date, msg)
+
+ def applyone(self, repo, node, cl, patchfile, merge=False, log=False,
+ filter=None):
+ '''apply the patch in patchfile to the repository as a transplant'''
+ (manifest, user, (time, timezone), files, message) = cl[:5]
+ date = "%d %d" % (time, timezone)
+ extra = {'transplant_source': node}
+ if filter:
+ (user, date, message) = self.filter(filter, cl, patchfile)
+
+ if log:
+ # we don't translate messages inserted into commits
+ message += '\n(transplanted from %s)' % revlog.hex(node)
+
+ self.ui.status(_('applying %s\n') % revlog.short(node))
+ self.ui.note('%s %s\n%s\n' % (user, date, message))
+
+ if not patchfile and not merge:
+ raise util.Abort(_('can only omit patchfile if merging'))
+ if patchfile:
+ try:
+ files = {}
+ try:
+ patch.patch(patchfile, self.ui, cwd=repo.root,
+ files=files, eolmode=None)
+ if not files:
+ self.ui.warn(_('%s: empty changeset')
+ % revlog.hex(node))
+ return None
+ finally:
+ files = patch.updatedir(self.ui, repo, files)
+ except Exception, inst:
+ if filter:
+ os.unlink(patchfile)
+ seriespath = os.path.join(self.path, 'series')
+ if os.path.exists(seriespath):
+ os.unlink(seriespath)
+ p1 = repo.dirstate.parents()[0]
+ p2 = node
+ self.log(user, date, message, p1, p2, merge=merge)
+ self.ui.write(str(inst) + '\n')
+ raise util.Abort(_('Fix up the merge and run '
+ 'hg transplant --continue'))
+ else:
+ files = None
+ if merge:
+ p1, p2 = repo.dirstate.parents()
+ repo.dirstate.setparents(p1, node)
+ m = match.always(repo.root, '')
+ else:
+ m = match.exact(repo.root, '', files)
+
+ n = repo.commit(message, user, date, extra=extra, match=m)
+ if not merge:
+ self.transplants.set(n, node)
+
+ return n
+
+ def resume(self, repo, source, opts=None):
+ '''recover last transaction and apply remaining changesets'''
+ if os.path.exists(os.path.join(self.path, 'journal')):
+ n, node = self.recover(repo)
+ self.ui.status(_('%s transplanted as %s\n') % (revlog.short(node),
+ revlog.short(n)))
+ seriespath = os.path.join(self.path, 'series')
+ if not os.path.exists(seriespath):
+ self.transplants.write()
+ return
+ nodes, merges = self.readseries()
+ revmap = {}
+ for n in nodes:
+ revmap[source.changelog.rev(n)] = n
+ os.unlink(seriespath)
+
+ self.apply(repo, source, revmap, merges, opts)
+
+ def recover(self, repo):
+ '''commit working directory using journal metadata'''
+ node, user, date, message, parents = self.readlog()
+ merge = len(parents) == 2
+
+ if not user or not date or not message or not parents[0]:
+ raise util.Abort(_('transplant log file is corrupt'))
+
+ extra = {'transplant_source': node}
+ wlock = repo.wlock()
+ try:
+ p1, p2 = repo.dirstate.parents()
+ if p1 != parents[0]:
+ raise util.Abort(
+ _('working dir not at transplant parent %s') %
+ revlog.hex(parents[0]))
+ if merge:
+ repo.dirstate.setparents(p1, parents[1])
+ n = repo.commit(message, user, date, extra=extra)
+ if not n:
+ raise util.Abort(_('commit failed'))
+ if not merge:
+ self.transplants.set(n, node)
+ self.unlog()
+
+ return n, node
+ finally:
+ wlock.release()
+
+ def readseries(self):
+ nodes = []
+ merges = []
+ cur = nodes
+ for line in self.opener('series').read().splitlines():
+ if line.startswith('# Merges'):
+ cur = merges
+ continue
+ cur.append(revlog.bin(line))
+
+ return (nodes, merges)
+
+ def saveseries(self, revmap, merges):
+ if not revmap:
+ return
+
+ if not os.path.isdir(self.path):
+ os.mkdir(self.path)
+ series = self.opener('series', 'w')
+ for rev in sorted(revmap):
+ series.write(revlog.hex(revmap[rev]) + '\n')
+ if merges:
+ series.write('# Merges\n')
+ for m in merges:
+ series.write(revlog.hex(m) + '\n')
+ series.close()
+
+ def parselog(self, fp):
+ parents = []
+ message = []
+ node = revlog.nullid
+ inmsg = False
+ for line in fp.read().splitlines():
+ if inmsg:
+ message.append(line)
+ elif line.startswith('# User '):
+ user = line[7:]
+ elif line.startswith('# Date '):
+ date = line[7:]
+ elif line.startswith('# Node ID '):
+ node = revlog.bin(line[10:])
+ elif line.startswith('# Parent '):
+ parents.append(revlog.bin(line[9:]))
+ elif not line.startswith('#'):
+ inmsg = True
+ message.append(line)
+ return (node, user, date, '\n'.join(message), parents)
+
+ def log(self, user, date, message, p1, p2, merge=False):
+ '''journal changelog metadata for later recover'''
+
+ if not os.path.isdir(self.path):
+ os.mkdir(self.path)
+ fp = self.opener('journal', 'w')
+ fp.write('# User %s\n' % user)
+ fp.write('# Date %s\n' % date)
+ fp.write('# Node ID %s\n' % revlog.hex(p2))
+ fp.write('# Parent ' + revlog.hex(p1) + '\n')
+ if merge:
+ fp.write('# Parent ' + revlog.hex(p2) + '\n')
+ fp.write(message.rstrip() + '\n')
+ fp.close()
+
+ def readlog(self):
+ return self.parselog(self.opener('journal'))
+
+ def unlog(self):
+ '''remove changelog journal'''
+ absdst = os.path.join(self.path, 'journal')
+ if os.path.exists(absdst):
+ os.unlink(absdst)
+
+ def transplantfilter(self, repo, source, root):
+ def matchfn(node):
+ if self.applied(repo, node, root):
+ return False
+ if source.changelog.parents(node)[1] != revlog.nullid:
+ return False
+ extra = source.changelog.read(node)[5]
+ cnode = extra.get('transplant_source')
+ if cnode and self.applied(repo, cnode, root):
+ return False
+ return True
+
+ return matchfn
+
+def hasnode(repo, node):
+ try:
+ return repo.changelog.rev(node) != None
+ except error.RevlogError:
+ return False
+
+def browserevs(ui, repo, nodes, opts):
+ '''interactively transplant changesets'''
+ def browsehelp(ui):
+ ui.write('y: transplant this changeset\n'
+ 'n: skip this changeset\n'
+ 'm: merge at this changeset\n'
+ 'p: show patch\n'
+ 'c: commit selected changesets\n'
+ 'q: cancel transplant\n'
+ '?: show this help\n')
+
+ displayer = cmdutil.show_changeset(ui, repo, opts)
+ transplants = []
+ merges = []
+ for node in nodes:
+ displayer.show(repo[node])
+ action = None
+ while not action:
+ action = ui.prompt(_('apply changeset? [ynmpcq?]:'))
+ if action == '?':
+ browsehelp(ui)
+ action = None
+ elif action == 'p':
+ parent = repo.changelog.parents(node)[0]
+ for chunk in patch.diff(repo, parent, node):
+ ui.write(chunk)
+ action = None
+ elif action not in ('y', 'n', 'm', 'c', 'q'):
+ ui.write('no such option\n')
+ action = None
+ if action == 'y':
+ transplants.append(node)
+ elif action == 'm':
+ merges.append(node)
+ elif action == 'c':
+ break
+ elif action == 'q':
+ transplants = ()
+ merges = ()
+ break
+ return (transplants, merges)
+
+def transplant(ui, repo, *revs, **opts):
+ '''transplant changesets from another branch
+
+ Selected changesets will be applied on top of the current working
+ directory with the log of the original changeset. If --log is
+ specified, log messages will have a comment appended of the form::
+
+ (transplanted from CHANGESETHASH)
+
+ You can rewrite the changelog message with the --filter option.
+ Its argument will be invoked with the current changelog message as
+ $1 and the patch as $2.
+
+ If --source/-s is specified, selects changesets from the named
+ repository. If --branch/-b is specified, selects changesets from
+ the branch holding the named revision, up to that revision. If
+ --all/-a is specified, all changesets on the branch will be
+ transplanted, otherwise you will be prompted to select the
+ changesets you want.
+
+ hg transplant --branch REVISION --all will rebase the selected
+ branch (up to the named revision) onto your current working
+ directory.
+
+ You can optionally mark selected transplanted changesets as merge
+ changesets. You will not be prompted to transplant any ancestors
+ of a merged transplant, and you can merge descendants of them
+ normally instead of transplanting them.
+
+ If no merges or revisions are provided, hg transplant will start
+ an interactive changeset browser.
+
+ If a changeset application fails, you can fix the merge by hand
+ and then resume where you left off by calling hg transplant
+ --continue/-c.
+ '''
+ def getremotechanges(repo, url):
+ sourcerepo = ui.expandpath(url)
+ source = hg.repository(ui, sourcerepo)
+ common, incoming, rheads = repo.findcommonincoming(source, force=True)
+ if not incoming:
+ return (source, None, None)
+
+ bundle = None
+ if not source.local():
+ if source.capable('changegroupsubset'):
+ cg = source.changegroupsubset(incoming, rheads, 'incoming')
+ else:
+ cg = source.changegroup(incoming, 'incoming')
+ bundle = changegroup.writebundle(cg, None, 'HG10UN')
+ source = bundlerepo.bundlerepository(ui, repo.root, bundle)
+
+ return (source, incoming, bundle)
+
+ def incwalk(repo, incoming, branches, match=util.always):
+ if not branches:
+ branches=None
+ for node in repo.changelog.nodesbetween(incoming, branches)[0]:
+ if match(node):
+ yield node
+
+ def transplantwalk(repo, root, branches, match=util.always):
+ if not branches:
+ branches = repo.heads()
+ ancestors = []
+ for branch in branches:
+ ancestors.append(repo.changelog.ancestor(root, branch))
+ for node in repo.changelog.nodesbetween(ancestors, branches)[0]:
+ if match(node):
+ yield node
+
+ def checkopts(opts, revs):
+ if opts.get('continue'):
+ if filter(lambda opt: opts.get(opt), ('branch', 'all', 'merge')):
+ raise util.Abort(_('--continue is incompatible with '
+ 'branch, all or merge'))
+ return
+ if not (opts.get('source') or revs or
+ opts.get('merge') or opts.get('branch')):
+ raise util.Abort(_('no source URL, branch tag or revision '
+ 'list provided'))
+ if opts.get('all'):
+ if not opts.get('branch'):
+ raise util.Abort(_('--all requires a branch revision'))
+ if revs:
+ raise util.Abort(_('--all is incompatible with a '
+ 'revision list'))
+
+ checkopts(opts, revs)
+
+ if not opts.get('log'):
+ opts['log'] = ui.config('transplant', 'log')
+ if not opts.get('filter'):
+ opts['filter'] = ui.config('transplant', 'filter')
+
+ tp = transplanter(ui, repo)
+
+ p1, p2 = repo.dirstate.parents()
+ if len(repo) > 0 and p1 == revlog.nullid:
+ raise util.Abort(_('no revision checked out'))
+ if not opts.get('continue'):
+ if p2 != revlog.nullid:
+ raise util.Abort(_('outstanding uncommitted merges'))
+ m, a, r, d = repo.status()[:4]
+ if m or a or r or d:
+ raise util.Abort(_('outstanding local changes'))
+
+ bundle = None
+ source = opts.get('source')
+ if source:
+ (source, incoming, bundle) = getremotechanges(repo, source)
+ else:
+ source = repo
+
+ try:
+ if opts.get('continue'):
+ tp.resume(repo, source, opts)
+ return
+
+ tf=tp.transplantfilter(repo, source, p1)
+ if opts.get('prune'):
+ prune = [source.lookup(r)
+ for r in cmdutil.revrange(source, opts.get('prune'))]
+ matchfn = lambda x: tf(x) and x not in prune
+ else:
+ matchfn = tf
+ branches = map(source.lookup, opts.get('branch', ()))
+ merges = map(source.lookup, opts.get('merge', ()))
+ revmap = {}
+ if revs:
+ for r in cmdutil.revrange(source, revs):
+ revmap[int(r)] = source.lookup(r)
+ elif opts.get('all') or not merges:
+ if source != repo:
+ alltransplants = incwalk(source, incoming, branches,
+ match=matchfn)
+ else:
+ alltransplants = transplantwalk(source, p1, branches,
+ match=matchfn)
+ if opts.get('all'):
+ revs = alltransplants
+ else:
+ revs, newmerges = browserevs(ui, source, alltransplants, opts)
+ merges.extend(newmerges)
+ for r in revs:
+ revmap[source.changelog.rev(r)] = r
+ for r in merges:
+ revmap[source.changelog.rev(r)] = r
+
+ tp.apply(repo, source, revmap, merges, opts)
+ finally:
+ if bundle:
+ source.close()
+ os.unlink(bundle)
+
+cmdtable = {
+ "transplant":
+ (transplant,
+ [('s', 'source', '', _('pull patches from REPOSITORY')),
+ ('b', 'branch', [], _('pull patches from branch BRANCH')),
+ ('a', 'all', None, _('pull all changesets up to BRANCH')),
+ ('p', 'prune', [], _('skip over REV')),
+ ('m', 'merge', [], _('merge at REV')),
+ ('', 'log', None, _('append transplant info to log message')),
+ ('c', 'continue', None, _('continue last transplant session '
+ 'after repair')),
+ ('', 'filter', '', _('filter changesets through FILTER'))],
+ _('hg transplant [-s REPOSITORY] [-b BRANCH [-a]] [-p REV] '
+ '[-m REV] [REV]...'))
+}
diff --git a/sys/src/cmd/hg/hgext/win32mbcs.py b/sys/src/cmd/hg/hgext/win32mbcs.py
new file mode 100644
index 000000000..a707f053e
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/win32mbcs.py
@@ -0,0 +1,147 @@
+# win32mbcs.py -- MBCS filename support for Mercurial
+#
+# Copyright (c) 2008 Shun-ichi Goto <shunichi.goto@gmail.com>
+#
+# Version: 0.2
+# Author: Shun-ichi Goto <shunichi.goto@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+#
+
+'''allow the use of MBCS paths with problematic encodings
+
+Some MBCS encodings are not good for some path operations (i.e.
+splitting path, case conversion, etc.) with its encoded bytes. We call
+such a encoding (i.e. shift_jis and big5) as "problematic encoding".
+This extension can be used to fix the issue with those encodings by
+wrapping some functions to convert to Unicode string before path
+operation.
+
+This extension is useful for:
+
+- Japanese Windows users using shift_jis encoding.
+- Chinese Windows users using big5 encoding.
+- All users who use a repository with one of problematic encodings on
+ case-insensitive file system.
+
+This extension is not needed for:
+
+- Any user who use only ASCII chars in path.
+- Any user who do not use any of problematic encodings.
+
+Note that there are some limitations on using this extension:
+
+- You should use single encoding in one repository.
+- You should set same encoding for the repository by locale or
+ HGENCODING.
+
+Path encoding conversion are done between Unicode and
+encoding.encoding which is decided by Mercurial from current locale
+setting or HGENCODING.
+'''
+
+import os, sys
+from mercurial.i18n import _
+from mercurial import util, encoding
+
+def decode(arg):
+ if isinstance(arg, str):
+ uarg = arg.decode(encoding.encoding)
+ if arg == uarg.encode(encoding.encoding):
+ return uarg
+ raise UnicodeError("Not local encoding")
+ elif isinstance(arg, tuple):
+ return tuple(map(decode, arg))
+ elif isinstance(arg, list):
+ return map(decode, arg)
+ elif isinstance(arg, dict):
+ for k, v in arg.items():
+ arg[k] = decode(v)
+ return arg
+
+def encode(arg):
+ if isinstance(arg, unicode):
+ return arg.encode(encoding.encoding)
+ elif isinstance(arg, tuple):
+ return tuple(map(encode, arg))
+ elif isinstance(arg, list):
+ return map(encode, arg)
+ elif isinstance(arg, dict):
+ for k, v in arg.items():
+ arg[k] = encode(v)
+ return arg
+
+def appendsep(s):
+ # ensure the path ends with os.sep, appending it if necessary.
+ try:
+ us = decode(s)
+ except UnicodeError:
+ us = s
+ if us and us[-1] not in ':/\\':
+ s += os.sep
+ return s
+
+def wrapper(func, args, kwds):
+ # check argument is unicode, then call original
+ for arg in args:
+ if isinstance(arg, unicode):
+ return func(*args, **kwds)
+
+ try:
+ # convert arguments to unicode, call func, then convert back
+ return encode(func(*decode(args), **decode(kwds)))
+ except UnicodeError:
+ raise util.Abort(_("[win32mbcs] filename conversion failed with"
+ " %s encoding\n") % (encoding.encoding))
+
+def wrapperforlistdir(func, args, kwds):
+ # Ensure 'path' argument ends with os.sep to avoids
+ # misinterpreting last 0x5c of MBCS 2nd byte as path separator.
+ if args:
+ args = list(args)
+ args[0] = appendsep(args[0])
+ if kwds.has_key('path'):
+ kwds['path'] = appendsep(kwds['path'])
+ return func(*args, **kwds)
+
+def wrapname(name, wrapper):
+ module, name = name.rsplit('.', 1)
+ module = sys.modules[module]
+ func = getattr(module, name)
+ def f(*args, **kwds):
+ return wrapper(func, args, kwds)
+ try:
+ f.__name__ = func.__name__ # fail with python23
+ except Exception:
+ pass
+ setattr(module, name, f)
+
+# List of functions to be wrapped.
+# NOTE: os.path.dirname() and os.path.basename() are safe because
+# they use result of os.path.split()
+funcs = '''os.path.join os.path.split os.path.splitext
+ os.path.splitunc os.path.normpath os.path.normcase os.makedirs
+ mercurial.util.endswithsep mercurial.util.splitpath mercurial.util.checkcase
+ mercurial.util.fspath mercurial.windows.pconvert'''
+
+# codec and alias names of sjis and big5 to be faked.
+problematic_encodings = '''big5 big5-tw csbig5 big5hkscs big5-hkscs
+ hkscs cp932 932 ms932 mskanji ms-kanji shift_jis csshiftjis shiftjis
+ sjis s_jis shift_jis_2004 shiftjis2004 sjis_2004 sjis2004
+ shift_jisx0213 shiftjisx0213 sjisx0213 s_jisx0213 950 cp950 ms950 '''
+
+def reposetup(ui, repo):
+ # TODO: decide use of config section for this extension
+ if not os.path.supports_unicode_filenames:
+ ui.warn(_("[win32mbcs] cannot activate on this platform.\n"))
+ return
+
+ # fake is only for relevant environment.
+ if encoding.encoding.lower() in problematic_encodings.split():
+ for f in funcs.split():
+ wrapname(f, wrapper)
+ wrapname("mercurial.osutil.listdir", wrapperforlistdir)
+ ui.debug(_("[win32mbcs] activated with encoding: %s\n")
+ % encoding.encoding)
+
diff --git a/sys/src/cmd/hg/hgext/win32text.py b/sys/src/cmd/hg/hgext/win32text.py
new file mode 100644
index 000000000..2c64f1356
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/win32text.py
@@ -0,0 +1,158 @@
+# win32text.py - LF <-> CRLF/CR translation utilities for Windows/Mac users
+#
+# Copyright 2005, 2007-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''perform automatic newline conversion
+
+To perform automatic newline conversion, use::
+
+ [extensions]
+ hgext.win32text =
+ [encode]
+ ** = cleverencode:
+ # or ** = macencode:
+
+ [decode]
+ ** = cleverdecode:
+ # or ** = macdecode:
+
+If not doing conversion, to make sure you do not commit CRLF/CR by accident::
+
+ [hooks]
+ pretxncommit.crlf = python:hgext.win32text.forbidcrlf
+ # or pretxncommit.cr = python:hgext.win32text.forbidcr
+
+To do the same check on a server to prevent CRLF/CR from being
+pushed or pulled::
+
+ [hooks]
+ pretxnchangegroup.crlf = python:hgext.win32text.forbidcrlf
+ # or pretxnchangegroup.cr = python:hgext.win32text.forbidcr
+'''
+
+from mercurial.i18n import _
+from mercurial.node import short
+from mercurial import util
+import re
+
+# regexp for single LF without CR preceding.
+re_single_lf = re.compile('(^|[^\r])\n', re.MULTILINE)
+
+newlinestr = {'\r\n': 'CRLF', '\r': 'CR'}
+filterstr = {'\r\n': 'clever', '\r': 'mac'}
+
+def checknewline(s, newline, ui=None, repo=None, filename=None):
+ # warn if already has 'newline' in repository.
+ # it might cause unexpected eol conversion.
+ # see issue 302:
+ # http://mercurial.selenic.com/bts/issue302
+ if newline in s and ui and filename and repo:
+ ui.warn(_('WARNING: %s already has %s line endings\n'
+ 'and does not need EOL conversion by the win32text plugin.\n'
+ 'Before your next commit, please reconsider your '
+ 'encode/decode settings in \nMercurial.ini or %s.\n') %
+ (filename, newlinestr[newline], repo.join('hgrc')))
+
+def dumbdecode(s, cmd, **kwargs):
+ checknewline(s, '\r\n', **kwargs)
+ # replace single LF to CRLF
+ return re_single_lf.sub('\\1\r\n', s)
+
+def dumbencode(s, cmd):
+ return s.replace('\r\n', '\n')
+
+def macdumbdecode(s, cmd, **kwargs):
+ checknewline(s, '\r', **kwargs)
+ return s.replace('\n', '\r')
+
+def macdumbencode(s, cmd):
+ return s.replace('\r', '\n')
+
+def cleverdecode(s, cmd, **kwargs):
+ if not util.binary(s):
+ return dumbdecode(s, cmd, **kwargs)
+ return s
+
+def cleverencode(s, cmd):
+ if not util.binary(s):
+ return dumbencode(s, cmd)
+ return s
+
+def macdecode(s, cmd, **kwargs):
+ if not util.binary(s):
+ return macdumbdecode(s, cmd, **kwargs)
+ return s
+
+def macencode(s, cmd):
+ if not util.binary(s):
+ return macdumbencode(s, cmd)
+ return s
+
+_filters = {
+ 'dumbdecode:': dumbdecode,
+ 'dumbencode:': dumbencode,
+ 'cleverdecode:': cleverdecode,
+ 'cleverencode:': cleverencode,
+ 'macdumbdecode:': macdumbdecode,
+ 'macdumbencode:': macdumbencode,
+ 'macdecode:': macdecode,
+ 'macencode:': macencode,
+ }
+
+def forbidnewline(ui, repo, hooktype, node, newline, **kwargs):
+ halt = False
+ seen = set()
+ # we try to walk changesets in reverse order from newest to
+ # oldest, so that if we see a file multiple times, we take the
+ # newest version as canonical. this prevents us from blocking a
+ # changegroup that contains an unacceptable commit followed later
+ # by a commit that fixes the problem.
+ tip = repo['tip']
+ for rev in xrange(len(repo)-1, repo[node].rev()-1, -1):
+ c = repo[rev]
+ for f in c.files():
+ if f in seen or f not in tip or f not in c:
+ continue
+ seen.add(f)
+ data = c[f].data()
+ if not util.binary(data) and newline in data:
+ if not halt:
+ ui.warn(_('Attempt to commit or push text file(s) '
+ 'using %s line endings\n') %
+ newlinestr[newline])
+ ui.warn(_('in %s: %s\n') % (short(c.node()), f))
+ halt = True
+ if halt and hooktype == 'pretxnchangegroup':
+ crlf = newlinestr[newline].lower()
+ filter = filterstr[newline]
+ ui.warn(_('\nTo prevent this mistake in your local repository,\n'
+ 'add to Mercurial.ini or .hg/hgrc:\n'
+ '\n'
+ '[hooks]\n'
+ 'pretxncommit.%s = python:hgext.win32text.forbid%s\n'
+ '\n'
+ 'and also consider adding:\n'
+ '\n'
+ '[extensions]\n'
+ 'hgext.win32text =\n'
+ '[encode]\n'
+ '** = %sencode:\n'
+ '[decode]\n'
+ '** = %sdecode:\n') % (crlf, crlf, filter, filter))
+ return halt
+
+def forbidcrlf(ui, repo, hooktype, node, **kwargs):
+ return forbidnewline(ui, repo, hooktype, node, '\r\n', **kwargs)
+
+def forbidcr(ui, repo, hooktype, node, **kwargs):
+ return forbidnewline(ui, repo, hooktype, node, '\r', **kwargs)
+
+def reposetup(ui, repo):
+ if not repo.local():
+ return
+ for name, fn in _filters.iteritems():
+ repo.adddatafilter(name, fn)
+
diff --git a/sys/src/cmd/hg/hgext/zeroconf/Zeroconf.py b/sys/src/cmd/hg/hgext/zeroconf/Zeroconf.py
new file mode 100644
index 000000000..33a345923
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/zeroconf/Zeroconf.py
@@ -0,0 +1,1573 @@
+""" Multicast DNS Service Discovery for Python, v0.12
+ Copyright (C) 2003, Paul Scott-Murphy
+
+ This module provides a framework for the use of DNS Service Discovery
+ using IP multicast. It has been tested against the JRendezvous
+ implementation from <a href="http://strangeberry.com">StrangeBerry</a>,
+ and against the mDNSResponder from Mac OS X 10.3.8.
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+"""
+
+"""0.12 update - allow selection of binding interface
+ typo fix - Thanks A. M. Kuchlingi
+ removed all use of word 'Rendezvous' - this is an API change"""
+
+"""0.11 update - correction to comments for addListener method
+ support for new record types seen from OS X
+ - IPv6 address
+ - hostinfo
+ ignore unknown DNS record types
+ fixes to name decoding
+ works alongside other processes using port 5353 (e.g. on Mac OS X)
+ tested against Mac OS X 10.3.2's mDNSResponder
+ corrections to removal of list entries for service browser"""
+
+"""0.10 update - Jonathon Paisley contributed these corrections:
+ always multicast replies, even when query is unicast
+ correct a pointer encoding problem
+ can now write records in any order
+ traceback shown on failure
+ better TXT record parsing
+ server is now separate from name
+ can cancel a service browser
+
+ modified some unit tests to accommodate these changes"""
+
+"""0.09 update - remove all records on service unregistration
+ fix DOS security problem with readName"""
+
+"""0.08 update - changed licensing to LGPL"""
+
+"""0.07 update - faster shutdown on engine
+ pointer encoding of outgoing names
+ ServiceBrowser now works
+ new unit tests"""
+
+"""0.06 update - small improvements with unit tests
+ added defined exception types
+ new style objects
+ fixed hostname/interface problem
+ fixed socket timeout problem
+ fixed addServiceListener() typo bug
+ using select() for socket reads
+ tested on Debian unstable with Python 2.2.2"""
+
+"""0.05 update - ensure case insensitivty on domain names
+ support for unicast DNS queries"""
+
+"""0.04 update - added some unit tests
+ added __ne__ adjuncts where required
+ ensure names end in '.local.'
+ timeout on receiving socket for clean shutdown"""
+
+__author__ = "Paul Scott-Murphy"
+__email__ = "paul at scott dash murphy dot com"
+__version__ = "0.12"
+
+import string
+import time
+import struct
+import socket
+import threading
+import select
+import traceback
+
+__all__ = ["Zeroconf", "ServiceInfo", "ServiceBrowser"]
+
+# hook for threads
+
+globals()['_GLOBAL_DONE'] = 0
+
+# Some timing constants
+
+_UNREGISTER_TIME = 125
+_CHECK_TIME = 175
+_REGISTER_TIME = 225
+_LISTENER_TIME = 200
+_BROWSER_TIME = 500
+
+# Some DNS constants
+
+_MDNS_ADDR = '224.0.0.251'
+_MDNS_PORT = 5353;
+_DNS_PORT = 53;
+_DNS_TTL = 60 * 60; # one hour default TTL
+
+_MAX_MSG_TYPICAL = 1460 # unused
+_MAX_MSG_ABSOLUTE = 8972
+
+_FLAGS_QR_MASK = 0x8000 # query response mask
+_FLAGS_QR_QUERY = 0x0000 # query
+_FLAGS_QR_RESPONSE = 0x8000 # response
+
+_FLAGS_AA = 0x0400 # Authorative answer
+_FLAGS_TC = 0x0200 # Truncated
+_FLAGS_RD = 0x0100 # Recursion desired
+_FLAGS_RA = 0x8000 # Recursion available
+
+_FLAGS_Z = 0x0040 # Zero
+_FLAGS_AD = 0x0020 # Authentic data
+_FLAGS_CD = 0x0010 # Checking disabled
+
+_CLASS_IN = 1
+_CLASS_CS = 2
+_CLASS_CH = 3
+_CLASS_HS = 4
+_CLASS_NONE = 254
+_CLASS_ANY = 255
+_CLASS_MASK = 0x7FFF
+_CLASS_UNIQUE = 0x8000
+
+_TYPE_A = 1
+_TYPE_NS = 2
+_TYPE_MD = 3
+_TYPE_MF = 4
+_TYPE_CNAME = 5
+_TYPE_SOA = 6
+_TYPE_MB = 7
+_TYPE_MG = 8
+_TYPE_MR = 9
+_TYPE_NULL = 10
+_TYPE_WKS = 11
+_TYPE_PTR = 12
+_TYPE_HINFO = 13
+_TYPE_MINFO = 14
+_TYPE_MX = 15
+_TYPE_TXT = 16
+_TYPE_AAAA = 28
+_TYPE_SRV = 33
+_TYPE_ANY = 255
+
+# Mapping constants to names
+
+_CLASSES = { _CLASS_IN : "in",
+ _CLASS_CS : "cs",
+ _CLASS_CH : "ch",
+ _CLASS_HS : "hs",
+ _CLASS_NONE : "none",
+ _CLASS_ANY : "any" }
+
+_TYPES = { _TYPE_A : "a",
+ _TYPE_NS : "ns",
+ _TYPE_MD : "md",
+ _TYPE_MF : "mf",
+ _TYPE_CNAME : "cname",
+ _TYPE_SOA : "soa",
+ _TYPE_MB : "mb",
+ _TYPE_MG : "mg",
+ _TYPE_MR : "mr",
+ _TYPE_NULL : "null",
+ _TYPE_WKS : "wks",
+ _TYPE_PTR : "ptr",
+ _TYPE_HINFO : "hinfo",
+ _TYPE_MINFO : "minfo",
+ _TYPE_MX : "mx",
+ _TYPE_TXT : "txt",
+ _TYPE_AAAA : "quada",
+ _TYPE_SRV : "srv",
+ _TYPE_ANY : "any" }
+
+# utility functions
+
+def currentTimeMillis():
+ """Current system time in milliseconds"""
+ return time.time() * 1000
+
+# Exceptions
+
+class NonLocalNameException(Exception):
+ pass
+
+class NonUniqueNameException(Exception):
+ pass
+
+class NamePartTooLongException(Exception):
+ pass
+
+class AbstractMethodException(Exception):
+ pass
+
+class BadTypeInNameException(Exception):
+ pass
+
+# implementation classes
+
+class DNSEntry(object):
+ """A DNS entry"""
+
+ def __init__(self, name, type, clazz):
+ self.key = string.lower(name)
+ self.name = name
+ self.type = type
+ self.clazz = clazz & _CLASS_MASK
+ self.unique = (clazz & _CLASS_UNIQUE) != 0
+
+ def __eq__(self, other):
+ """Equality test on name, type, and class"""
+ if isinstance(other, DNSEntry):
+ return self.name == other.name and self.type == other.type and self.clazz == other.clazz
+ return 0
+
+ def __ne__(self, other):
+ """Non-equality test"""
+ return not self.__eq__(other)
+
+ def getClazz(self, clazz):
+ """Class accessor"""
+ try:
+ return _CLASSES[clazz]
+ except:
+ return "?(%s)" % (clazz)
+
+ def getType(self, type):
+ """Type accessor"""
+ try:
+ return _TYPES[type]
+ except:
+ return "?(%s)" % (type)
+
+ def toString(self, hdr, other):
+ """String representation with additional information"""
+ result = "%s[%s,%s" % (hdr, self.getType(self.type), self.getClazz(self.clazz))
+ if self.unique:
+ result += "-unique,"
+ else:
+ result += ","
+ result += self.name
+ if other is not None:
+ result += ",%s]" % (other)
+ else:
+ result += "]"
+ return result
+
+class DNSQuestion(DNSEntry):
+ """A DNS question entry"""
+
+ def __init__(self, name, type, clazz):
+ if not name.endswith(".local."):
+ raise NonLocalNameException(name)
+ DNSEntry.__init__(self, name, type, clazz)
+
+ def answeredBy(self, rec):
+ """Returns true if the question is answered by the record"""
+ return self.clazz == rec.clazz and (self.type == rec.type or self.type == _TYPE_ANY) and self.name == rec.name
+
+ def __repr__(self):
+ """String representation"""
+ return DNSEntry.toString(self, "question", None)
+
+
+class DNSRecord(DNSEntry):
+ """A DNS record - like a DNS entry, but has a TTL"""
+
+ def __init__(self, name, type, clazz, ttl):
+ DNSEntry.__init__(self, name, type, clazz)
+ self.ttl = ttl
+ self.created = currentTimeMillis()
+
+ def __eq__(self, other):
+ """Tests equality as per DNSRecord"""
+ if isinstance(other, DNSRecord):
+ return DNSEntry.__eq__(self, other)
+ return 0
+
+ def suppressedBy(self, msg):
+ """Returns true if any answer in a message can suffice for the
+ information held in this record."""
+ for record in msg.answers:
+ if self.suppressedByAnswer(record):
+ return 1
+ return 0
+
+ def suppressedByAnswer(self, other):
+ """Returns true if another record has same name, type and class,
+ and if its TTL is at least half of this record's."""
+ if self == other and other.ttl > (self.ttl / 2):
+ return 1
+ return 0
+
+ def getExpirationTime(self, percent):
+ """Returns the time at which this record will have expired
+ by a certain percentage."""
+ return self.created + (percent * self.ttl * 10)
+
+ def getRemainingTTL(self, now):
+ """Returns the remaining TTL in seconds."""
+ return max(0, (self.getExpirationTime(100) - now) / 1000)
+
+ def isExpired(self, now):
+ """Returns true if this record has expired."""
+ return self.getExpirationTime(100) <= now
+
+ def isStale(self, now):
+ """Returns true if this record is at least half way expired."""
+ return self.getExpirationTime(50) <= now
+
+ def resetTTL(self, other):
+ """Sets this record's TTL and created time to that of
+ another record."""
+ self.created = other.created
+ self.ttl = other.ttl
+
+ def write(self, out):
+ """Abstract method"""
+ raise AbstractMethodException
+
+ def toString(self, other):
+ """String representation with addtional information"""
+ arg = "%s/%s,%s" % (self.ttl, self.getRemainingTTL(currentTimeMillis()), other)
+ return DNSEntry.toString(self, "record", arg)
+
+class DNSAddress(DNSRecord):
+ """A DNS address record"""
+
+ def __init__(self, name, type, clazz, ttl, address):
+ DNSRecord.__init__(self, name, type, clazz, ttl)
+ self.address = address
+
+ def write(self, out):
+ """Used in constructing an outgoing packet"""
+ out.writeString(self.address, len(self.address))
+
+ def __eq__(self, other):
+ """Tests equality on address"""
+ if isinstance(other, DNSAddress):
+ return self.address == other.address
+ return 0
+
+ def __repr__(self):
+ """String representation"""
+ try:
+ return socket.inet_ntoa(self.address)
+ except:
+ return self.address
+
+class DNSHinfo(DNSRecord):
+ """A DNS host information record"""
+
+ def __init__(self, name, type, clazz, ttl, cpu, os):
+ DNSRecord.__init__(self, name, type, clazz, ttl)
+ self.cpu = cpu
+ self.os = os
+
+ def write(self, out):
+ """Used in constructing an outgoing packet"""
+ out.writeString(self.cpu, len(self.cpu))
+ out.writeString(self.os, len(self.os))
+
+ def __eq__(self, other):
+ """Tests equality on cpu and os"""
+ if isinstance(other, DNSHinfo):
+ return self.cpu == other.cpu and self.os == other.os
+ return 0
+
+ def __repr__(self):
+ """String representation"""
+ return self.cpu + " " + self.os
+
+class DNSPointer(DNSRecord):
+ """A DNS pointer record"""
+
+ def __init__(self, name, type, clazz, ttl, alias):
+ DNSRecord.__init__(self, name, type, clazz, ttl)
+ self.alias = alias
+
+ def write(self, out):
+ """Used in constructing an outgoing packet"""
+ out.writeName(self.alias)
+
+ def __eq__(self, other):
+ """Tests equality on alias"""
+ if isinstance(other, DNSPointer):
+ return self.alias == other.alias
+ return 0
+
+ def __repr__(self):
+ """String representation"""
+ return self.toString(self.alias)
+
+class DNSText(DNSRecord):
+ """A DNS text record"""
+
+ def __init__(self, name, type, clazz, ttl, text):
+ DNSRecord.__init__(self, name, type, clazz, ttl)
+ self.text = text
+
+ def write(self, out):
+ """Used in constructing an outgoing packet"""
+ out.writeString(self.text, len(self.text))
+
+ def __eq__(self, other):
+ """Tests equality on text"""
+ if isinstance(other, DNSText):
+ return self.text == other.text
+ return 0
+
+ def __repr__(self):
+ """String representation"""
+ if len(self.text) > 10:
+ return self.toString(self.text[:7] + "...")
+ else:
+ return self.toString(self.text)
+
+class DNSService(DNSRecord):
+ """A DNS service record"""
+
+ def __init__(self, name, type, clazz, ttl, priority, weight, port, server):
+ DNSRecord.__init__(self, name, type, clazz, ttl)
+ self.priority = priority
+ self.weight = weight
+ self.port = port
+ self.server = server
+
+ def write(self, out):
+ """Used in constructing an outgoing packet"""
+ out.writeShort(self.priority)
+ out.writeShort(self.weight)
+ out.writeShort(self.port)
+ out.writeName(self.server)
+
+ def __eq__(self, other):
+ """Tests equality on priority, weight, port and server"""
+ if isinstance(other, DNSService):
+ return self.priority == other.priority and self.weight == other.weight and self.port == other.port and self.server == other.server
+ return 0
+
+ def __repr__(self):
+ """String representation"""
+ return self.toString("%s:%s" % (self.server, self.port))
+
+class DNSIncoming(object):
+ """Object representation of an incoming DNS packet"""
+
+ def __init__(self, data):
+ """Constructor from string holding bytes of packet"""
+ self.offset = 0
+ self.data = data
+ self.questions = []
+ self.answers = []
+ self.numQuestions = 0
+ self.numAnswers = 0
+ self.numAuthorities = 0
+ self.numAdditionals = 0
+
+ self.readHeader()
+ self.readQuestions()
+ self.readOthers()
+
+ def readHeader(self):
+ """Reads header portion of packet"""
+ format = '!HHHHHH'
+ length = struct.calcsize(format)
+ info = struct.unpack(format, self.data[self.offset:self.offset+length])
+ self.offset += length
+
+ self.id = info[0]
+ self.flags = info[1]
+ self.numQuestions = info[2]
+ self.numAnswers = info[3]
+ self.numAuthorities = info[4]
+ self.numAdditionals = info[5]
+
+ def readQuestions(self):
+ """Reads questions section of packet"""
+ format = '!HH'
+ length = struct.calcsize(format)
+ for i in range(0, self.numQuestions):
+ name = self.readName()
+ info = struct.unpack(format, self.data[self.offset:self.offset+length])
+ self.offset += length
+
+ try:
+ question = DNSQuestion(name, info[0], info[1])
+ self.questions.append(question)
+ except NonLocalNameException:
+ pass
+
+ def readInt(self):
+ """Reads an integer from the packet"""
+ format = '!I'
+ length = struct.calcsize(format)
+ info = struct.unpack(format, self.data[self.offset:self.offset+length])
+ self.offset += length
+ return info[0]
+
+ def readCharacterString(self):
+ """Reads a character string from the packet"""
+ length = ord(self.data[self.offset])
+ self.offset += 1
+ return self.readString(length)
+
+ def readString(self, len):
+ """Reads a string of a given length from the packet"""
+ format = '!' + str(len) + 's'
+ length = struct.calcsize(format)
+ info = struct.unpack(format, self.data[self.offset:self.offset+length])
+ self.offset += length
+ return info[0]
+
+ def readUnsignedShort(self):
+ """Reads an unsigned short from the packet"""
+ format = '!H'
+ length = struct.calcsize(format)
+ info = struct.unpack(format, self.data[self.offset:self.offset+length])
+ self.offset += length
+ return info[0]
+
+ def readOthers(self):
+ """Reads the answers, authorities and additionals section of the packet"""
+ format = '!HHiH'
+ length = struct.calcsize(format)
+ n = self.numAnswers + self.numAuthorities + self.numAdditionals
+ for i in range(0, n):
+ domain = self.readName()
+ info = struct.unpack(format, self.data[self.offset:self.offset+length])
+ self.offset += length
+
+ rec = None
+ if info[0] == _TYPE_A:
+ rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(4))
+ elif info[0] == _TYPE_CNAME or info[0] == _TYPE_PTR:
+ rec = DNSPointer(domain, info[0], info[1], info[2], self.readName())
+ elif info[0] == _TYPE_TXT:
+ rec = DNSText(domain, info[0], info[1], info[2], self.readString(info[3]))
+ elif info[0] == _TYPE_SRV:
+ rec = DNSService(domain, info[0], info[1], info[2], self.readUnsignedShort(), self.readUnsignedShort(), self.readUnsignedShort(), self.readName())
+ elif info[0] == _TYPE_HINFO:
+ rec = DNSHinfo(domain, info[0], info[1], info[2], self.readCharacterString(), self.readCharacterString())
+ elif info[0] == _TYPE_AAAA:
+ rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(16))
+ else:
+ # Try to ignore types we don't know about
+ # this may mean the rest of the name is
+ # unable to be parsed, and may show errors
+ # so this is left for debugging. New types
+ # encountered need to be parsed properly.
+ #
+ #print "UNKNOWN TYPE = " + str(info[0])
+ #raise BadTypeInNameException
+ pass
+
+ if rec is not None:
+ self.answers.append(rec)
+
+ def isQuery(self):
+ """Returns true if this is a query"""
+ return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_QUERY
+
+ def isResponse(self):
+ """Returns true if this is a response"""
+ return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_RESPONSE
+
+ def readUTF(self, offset, len):
+ """Reads a UTF-8 string of a given length from the packet"""
+ result = self.data[offset:offset+len].decode('utf-8')
+ return result
+
+ def readName(self):
+ """Reads a domain name from the packet"""
+ result = ''
+ off = self.offset
+ next = -1
+ first = off
+
+ while 1:
+ len = ord(self.data[off])
+ off += 1
+ if len == 0:
+ break
+ t = len & 0xC0
+ if t == 0x00:
+ result = ''.join((result, self.readUTF(off, len) + '.'))
+ off += len
+ elif t == 0xC0:
+ if next < 0:
+ next = off + 1
+ off = ((len & 0x3F) << 8) | ord(self.data[off])
+ if off >= first:
+ raise "Bad domain name (circular) at " + str(off)
+ first = off
+ else:
+ raise "Bad domain name at " + str(off)
+
+ if next >= 0:
+ self.offset = next
+ else:
+ self.offset = off
+
+ return result
+
+
+class DNSOutgoing(object):
+ """Object representation of an outgoing packet"""
+
+ def __init__(self, flags, multicast = 1):
+ self.finished = 0
+ self.id = 0
+ self.multicast = multicast
+ self.flags = flags
+ self.names = {}
+ self.data = []
+ self.size = 12
+
+ self.questions = []
+ self.answers = []
+ self.authorities = []
+ self.additionals = []
+
+ def addQuestion(self, record):
+ """Adds a question"""
+ self.questions.append(record)
+
+ def addAnswer(self, inp, record):
+ """Adds an answer"""
+ if not record.suppressedBy(inp):
+ self.addAnswerAtTime(record, 0)
+
+ def addAnswerAtTime(self, record, now):
+ """Adds an answer if if does not expire by a certain time"""
+ if record is not None:
+ if now == 0 or not record.isExpired(now):
+ self.answers.append((record, now))
+
+ def addAuthorativeAnswer(self, record):
+ """Adds an authoritative answer"""
+ self.authorities.append(record)
+
+ def addAdditionalAnswer(self, record):
+ """Adds an additional answer"""
+ self.additionals.append(record)
+
+ def writeByte(self, value):
+ """Writes a single byte to the packet"""
+ format = '!c'
+ self.data.append(struct.pack(format, chr(value)))
+ self.size += 1
+
+ def insertShort(self, index, value):
+ """Inserts an unsigned short in a certain position in the packet"""
+ format = '!H'
+ self.data.insert(index, struct.pack(format, value))
+ self.size += 2
+
+ def writeShort(self, value):
+ """Writes an unsigned short to the packet"""
+ format = '!H'
+ self.data.append(struct.pack(format, value))
+ self.size += 2
+
+ def writeInt(self, value):
+ """Writes an unsigned integer to the packet"""
+ format = '!I'
+ self.data.append(struct.pack(format, int(value)))
+ self.size += 4
+
+ def writeString(self, value, length):
+ """Writes a string to the packet"""
+ format = '!' + str(length) + 's'
+ self.data.append(struct.pack(format, value))
+ self.size += length
+
+ def writeUTF(self, s):
+ """Writes a UTF-8 string of a given length to the packet"""
+ utfstr = s.encode('utf-8')
+ length = len(utfstr)
+ if length > 64:
+ raise NamePartTooLongException
+ self.writeByte(length)
+ self.writeString(utfstr, length)
+
+ def writeName(self, name):
+ """Writes a domain name to the packet"""
+
+ try:
+ # Find existing instance of this name in packet
+ #
+ index = self.names[name]
+ except KeyError:
+ # No record of this name already, so write it
+ # out as normal, recording the location of the name
+ # for future pointers to it.
+ #
+ self.names[name] = self.size
+ parts = name.split('.')
+ if parts[-1] == '':
+ parts = parts[:-1]
+ for part in parts:
+ self.writeUTF(part)
+ self.writeByte(0)
+ return
+
+ # An index was found, so write a pointer to it
+ #
+ self.writeByte((index >> 8) | 0xC0)
+ self.writeByte(index)
+
+ def writeQuestion(self, question):
+ """Writes a question to the packet"""
+ self.writeName(question.name)
+ self.writeShort(question.type)
+ self.writeShort(question.clazz)
+
+ def writeRecord(self, record, now):
+ """Writes a record (answer, authoritative answer, additional) to
+ the packet"""
+ self.writeName(record.name)
+ self.writeShort(record.type)
+ if record.unique and self.multicast:
+ self.writeShort(record.clazz | _CLASS_UNIQUE)
+ else:
+ self.writeShort(record.clazz)
+ if now == 0:
+ self.writeInt(record.ttl)
+ else:
+ self.writeInt(record.getRemainingTTL(now))
+ index = len(self.data)
+ # Adjust size for the short we will write before this record
+ #
+ self.size += 2
+ record.write(self)
+ self.size -= 2
+
+ length = len(''.join(self.data[index:]))
+ self.insertShort(index, length) # Here is the short we adjusted for
+
+ def packet(self):
+ """Returns a string containing the packet's bytes
+
+ No further parts should be added to the packet once this
+ is done."""
+ if not self.finished:
+ self.finished = 1
+ for question in self.questions:
+ self.writeQuestion(question)
+ for answer, time in self.answers:
+ self.writeRecord(answer, time)
+ for authority in self.authorities:
+ self.writeRecord(authority, 0)
+ for additional in self.additionals:
+ self.writeRecord(additional, 0)
+
+ self.insertShort(0, len(self.additionals))
+ self.insertShort(0, len(self.authorities))
+ self.insertShort(0, len(self.answers))
+ self.insertShort(0, len(self.questions))
+ self.insertShort(0, self.flags)
+ if self.multicast:
+ self.insertShort(0, 0)
+ else:
+ self.insertShort(0, self.id)
+ return ''.join(self.data)
+
+
+class DNSCache(object):
+ """A cache of DNS entries"""
+
+ def __init__(self):
+ self.cache = {}
+
+ def add(self, entry):
+ """Adds an entry"""
+ try:
+ list = self.cache[entry.key]
+ except:
+ list = self.cache[entry.key] = []
+ list.append(entry)
+
+ def remove(self, entry):
+ """Removes an entry"""
+ try:
+ list = self.cache[entry.key]
+ list.remove(entry)
+ except:
+ pass
+
+ def get(self, entry):
+ """Gets an entry by key. Will return None if there is no
+ matching entry."""
+ try:
+ list = self.cache[entry.key]
+ return list[list.index(entry)]
+ except:
+ return None
+
+ def getByDetails(self, name, type, clazz):
+ """Gets an entry by details. Will return None if there is
+ no matching entry."""
+ entry = DNSEntry(name, type, clazz)
+ return self.get(entry)
+
+ def entriesWithName(self, name):
+ """Returns a list of entries whose key matches the name."""
+ try:
+ return self.cache[name]
+ except:
+ return []
+
+ def entries(self):
+ """Returns a list of all entries"""
+ def add(x, y): return x+y
+ try:
+ return reduce(add, self.cache.values())
+ except:
+ return []
+
+
+class Engine(threading.Thread):
+ """An engine wraps read access to sockets, allowing objects that
+ need to receive data from sockets to be called back when the
+ sockets are ready.
+
+ A reader needs a handle_read() method, which is called when the socket
+ it is interested in is ready for reading.
+
+ Writers are not implemented here, because we only send short
+ packets.
+ """
+
+ def __init__(self, zeroconf):
+ threading.Thread.__init__(self)
+ self.zeroconf = zeroconf
+ self.readers = {} # maps socket to reader
+ self.timeout = 5
+ self.condition = threading.Condition()
+ self.start()
+
+ def run(self):
+ while not globals()['_GLOBAL_DONE']:
+ rs = self.getReaders()
+ if len(rs) == 0:
+ # No sockets to manage, but we wait for the timeout
+ # or addition of a socket
+ #
+ self.condition.acquire()
+ self.condition.wait(self.timeout)
+ self.condition.release()
+ else:
+ try:
+ rr, wr, er = select.select(rs, [], [], self.timeout)
+ for socket in rr:
+ try:
+ self.readers[socket].handle_read()
+ except:
+ traceback.print_exc()
+ except:
+ pass
+
+ def getReaders(self):
+ self.condition.acquire()
+ result = self.readers.keys()
+ self.condition.release()
+ return result
+
+ def addReader(self, reader, socket):
+ self.condition.acquire()
+ self.readers[socket] = reader
+ self.condition.notify()
+ self.condition.release()
+
+ def delReader(self, socket):
+ self.condition.acquire()
+ del(self.readers[socket])
+ self.condition.notify()
+ self.condition.release()
+
+ def notify(self):
+ self.condition.acquire()
+ self.condition.notify()
+ self.condition.release()
+
+class Listener(object):
+ """A Listener is used by this module to listen on the multicast
+ group to which DNS messages are sent, allowing the implementation
+ to cache information as it arrives.
+
+ It requires registration with an Engine object in order to have
+ the read() method called when a socket is availble for reading."""
+
+ def __init__(self, zeroconf):
+ self.zeroconf = zeroconf
+ self.zeroconf.engine.addReader(self, self.zeroconf.socket)
+
+ def handle_read(self):
+ data, (addr, port) = self.zeroconf.socket.recvfrom(_MAX_MSG_ABSOLUTE)
+ self.data = data
+ msg = DNSIncoming(data)
+ if msg.isQuery():
+ # Always multicast responses
+ #
+ if port == _MDNS_PORT:
+ self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
+ # If it's not a multicast query, reply via unicast
+ # and multicast
+ #
+ elif port == _DNS_PORT:
+ self.zeroconf.handleQuery(msg, addr, port)
+ self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
+ else:
+ self.zeroconf.handleResponse(msg)
+
+
+class Reaper(threading.Thread):
+ """A Reaper is used by this module to remove cache entries that
+ have expired."""
+
+ def __init__(self, zeroconf):
+ threading.Thread.__init__(self)
+ self.zeroconf = zeroconf
+ self.start()
+
+ def run(self):
+ while 1:
+ self.zeroconf.wait(10 * 1000)
+ if globals()['_GLOBAL_DONE']:
+ return
+ now = currentTimeMillis()
+ for record in self.zeroconf.cache.entries():
+ if record.isExpired(now):
+ self.zeroconf.updateRecord(now, record)
+ self.zeroconf.cache.remove(record)
+
+
+class ServiceBrowser(threading.Thread):
+ """Used to browse for a service of a specific type.
+
+ The listener object will have its addService() and
+ removeService() methods called when this browser
+ discovers changes in the services availability."""
+
+ def __init__(self, zeroconf, type, listener):
+ """Creates a browser for a specific type"""
+ threading.Thread.__init__(self)
+ self.zeroconf = zeroconf
+ self.type = type
+ self.listener = listener
+ self.services = {}
+ self.nextTime = currentTimeMillis()
+ self.delay = _BROWSER_TIME
+ self.list = []
+
+ self.done = 0
+
+ self.zeroconf.addListener(self, DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
+ self.start()
+
+ def updateRecord(self, zeroconf, now, record):
+ """Callback invoked by Zeroconf when new information arrives.
+
+ Updates information required by browser in the Zeroconf cache."""
+ if record.type == _TYPE_PTR and record.name == self.type:
+ expired = record.isExpired(now)
+ try:
+ oldrecord = self.services[record.alias.lower()]
+ if not expired:
+ oldrecord.resetTTL(record)
+ else:
+ del(self.services[record.alias.lower()])
+ callback = lambda x: self.listener.removeService(x, self.type, record.alias)
+ self.list.append(callback)
+ return
+ except:
+ if not expired:
+ self.services[record.alias.lower()] = record
+ callback = lambda x: self.listener.addService(x, self.type, record.alias)
+ self.list.append(callback)
+
+ expires = record.getExpirationTime(75)
+ if expires < self.nextTime:
+ self.nextTime = expires
+
+ def cancel(self):
+ self.done = 1
+ self.zeroconf.notifyAll()
+
+ def run(self):
+ while 1:
+ event = None
+ now = currentTimeMillis()
+ if len(self.list) == 0 and self.nextTime > now:
+ self.zeroconf.wait(self.nextTime - now)
+ if globals()['_GLOBAL_DONE'] or self.done:
+ return
+ now = currentTimeMillis()
+
+ if self.nextTime <= now:
+ out = DNSOutgoing(_FLAGS_QR_QUERY)
+ out.addQuestion(DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
+ for record in self.services.values():
+ if not record.isExpired(now):
+ out.addAnswerAtTime(record, now)
+ self.zeroconf.send(out)
+ self.nextTime = now + self.delay
+ self.delay = min(20 * 1000, self.delay * 2)
+
+ if len(self.list) > 0:
+ event = self.list.pop(0)
+
+ if event is not None:
+ event(self.zeroconf)
+
+
+class ServiceInfo(object):
+ """Service information"""
+
+ def __init__(self, type, name, address=None, port=None, weight=0, priority=0, properties=None, server=None):
+ """Create a service description.
+
+ type: fully qualified service type name
+ name: fully qualified service name
+ address: IP address as unsigned short, network byte order
+ port: port that the service runs on
+ weight: weight of the service
+ priority: priority of the service
+ properties: dictionary of properties (or a string holding the bytes for the text field)
+ server: fully qualified name for service host (defaults to name)"""
+
+ if not name.endswith(type):
+ raise BadTypeInNameException
+ self.type = type
+ self.name = name
+ self.address = address
+ self.port = port
+ self.weight = weight
+ self.priority = priority
+ if server:
+ self.server = server
+ else:
+ self.server = name
+ self.setProperties(properties)
+
+ def setProperties(self, properties):
+ """Sets properties and text of this info from a dictionary"""
+ if isinstance(properties, dict):
+ self.properties = properties
+ list = []
+ result = ''
+ for key in properties:
+ value = properties[key]
+ if value is None:
+ suffix = ''.encode('utf-8')
+ elif isinstance(value, str):
+ suffix = value.encode('utf-8')
+ elif isinstance(value, int):
+ if value:
+ suffix = 'true'
+ else:
+ suffix = 'false'
+ else:
+ suffix = ''.encode('utf-8')
+ list.append('='.join((key, suffix)))
+ for item in list:
+ result = ''.join((result, struct.pack('!c', chr(len(item))), item))
+ self.text = result
+ else:
+ self.text = properties
+
+ def setText(self, text):
+ """Sets properties and text given a text field"""
+ self.text = text
+ try:
+ result = {}
+ end = len(text)
+ index = 0
+ strs = []
+ while index < end:
+ length = ord(text[index])
+ index += 1
+ strs.append(text[index:index+length])
+ index += length
+
+ for s in strs:
+ eindex = s.find('=')
+ if eindex == -1:
+ # No equals sign at all
+ key = s
+ value = 0
+ else:
+ key = s[:eindex]
+ value = s[eindex+1:]
+ if value == 'true':
+ value = 1
+ elif value == 'false' or not value:
+ value = 0
+
+ # Only update non-existent properties
+ if key and result.get(key) == None:
+ result[key] = value
+
+ self.properties = result
+ except:
+ traceback.print_exc()
+ self.properties = None
+
+ def getType(self):
+ """Type accessor"""
+ return self.type
+
+ def getName(self):
+ """Name accessor"""
+ if self.type is not None and self.name.endswith("." + self.type):
+ return self.name[:len(self.name) - len(self.type) - 1]
+ return self.name
+
+ def getAddress(self):
+ """Address accessor"""
+ return self.address
+
+ def getPort(self):
+ """Port accessor"""
+ return self.port
+
+ def getPriority(self):
+ """Pirority accessor"""
+ return self.priority
+
+ def getWeight(self):
+ """Weight accessor"""
+ return self.weight
+
+ def getProperties(self):
+ """Properties accessor"""
+ return self.properties
+
+ def getText(self):
+ """Text accessor"""
+ return self.text
+
+ def getServer(self):
+ """Server accessor"""
+ return self.server
+
+ def updateRecord(self, zeroconf, now, record):
+ """Updates service information from a DNS record"""
+ if record is not None and not record.isExpired(now):
+ if record.type == _TYPE_A:
+ #if record.name == self.name:
+ if record.name == self.server:
+ self.address = record.address
+ elif record.type == _TYPE_SRV:
+ if record.name == self.name:
+ self.server = record.server
+ self.port = record.port
+ self.weight = record.weight
+ self.priority = record.priority
+ #self.address = None
+ self.updateRecord(zeroconf, now, zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN))
+ elif record.type == _TYPE_TXT:
+ if record.name == self.name:
+ self.setText(record.text)
+
+ def request(self, zeroconf, timeout):
+ """Returns true if the service could be discovered on the
+ network, and updates this object with details discovered.
+ """
+ now = currentTimeMillis()
+ delay = _LISTENER_TIME
+ next = now + delay
+ last = now + timeout
+ result = 0
+ try:
+ zeroconf.addListener(self, DNSQuestion(self.name, _TYPE_ANY, _CLASS_IN))
+ while self.server is None or self.address is None or self.text is None:
+ if last <= now:
+ return 0
+ if next <= now:
+ out = DNSOutgoing(_FLAGS_QR_QUERY)
+ out.addQuestion(DNSQuestion(self.name, _TYPE_SRV, _CLASS_IN))
+ out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_SRV, _CLASS_IN), now)
+ out.addQuestion(DNSQuestion(self.name, _TYPE_TXT, _CLASS_IN))
+ out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_TXT, _CLASS_IN), now)
+ if self.server is not None:
+ out.addQuestion(DNSQuestion(self.server, _TYPE_A, _CLASS_IN))
+ out.addAnswerAtTime(zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN), now)
+ zeroconf.send(out)
+ next = now + delay
+ delay = delay * 2
+
+ zeroconf.wait(min(next, last) - now)
+ now = currentTimeMillis()
+ result = 1
+ finally:
+ zeroconf.removeListener(self)
+
+ return result
+
+ def __eq__(self, other):
+ """Tests equality of service name"""
+ if isinstance(other, ServiceInfo):
+ return other.name == self.name
+ return 0
+
+ def __ne__(self, other):
+ """Non-equality test"""
+ return not self.__eq__(other)
+
+ def __repr__(self):
+ """String representation"""
+ result = "service[%s,%s:%s," % (self.name, socket.inet_ntoa(self.getAddress()), self.port)
+ if self.text is None:
+ result += "None"
+ else:
+ if len(self.text) < 20:
+ result += self.text
+ else:
+ result += self.text[:17] + "..."
+ result += "]"
+ return result
+
+
+class Zeroconf(object):
+ """Implementation of Zeroconf Multicast DNS Service Discovery
+
+ Supports registration, unregistration, queries and browsing.
+ """
+ def __init__(self, bindaddress=None):
+ """Creates an instance of the Zeroconf class, establishing
+ multicast communications, listening and reaping threads."""
+ globals()['_GLOBAL_DONE'] = 0
+ if bindaddress is None:
+ self.intf = socket.gethostbyname(socket.gethostname())
+ else:
+ self.intf = bindaddress
+ self.group = ('', _MDNS_PORT)
+ self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ try:
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+ except:
+ # SO_REUSEADDR should be equivalent to SO_REUSEPORT for
+ # multicast UDP sockets (p 731, "TCP/IP Illustrated,
+ # Volume 2"), but some BSD-derived systems require
+ # SO_REUSEPORT to be specified explicity. Also, not all
+ # versions of Python have SO_REUSEPORT available. So
+ # if you're on a BSD-based system, and haven't upgraded
+ # to Python 2.3 yet, you may find this library doesn't
+ # work as expected.
+ #
+ pass
+ self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 255)
+ self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
+ try:
+ self.socket.bind(self.group)
+ except:
+ # Some versions of linux raise an exception even though
+ # the SO_REUSE* options have been set, so ignore it
+ #
+ pass
+ #self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton(self.intf) + socket.inet_aton('0.0.0.0'))
+ self.socket.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
+
+ self.listeners = []
+ self.browsers = []
+ self.services = {}
+ self.servicetypes = {}
+
+ self.cache = DNSCache()
+
+ self.condition = threading.Condition()
+
+ self.engine = Engine(self)
+ self.listener = Listener(self)
+ self.reaper = Reaper(self)
+
+ def isLoopback(self):
+ return self.intf.startswith("127.0.0.1")
+
+ def isLinklocal(self):
+ return self.intf.startswith("169.254.")
+
+ def wait(self, timeout):
+ """Calling thread waits for a given number of milliseconds or
+ until notified."""
+ self.condition.acquire()
+ self.condition.wait(timeout/1000)
+ self.condition.release()
+
+ def notifyAll(self):
+ """Notifies all waiting threads"""
+ self.condition.acquire()
+ self.condition.notifyAll()
+ self.condition.release()
+
+ def getServiceInfo(self, type, name, timeout=3000):
+ """Returns network's service information for a particular
+ name and type, or None if no service matches by the timeout,
+ which defaults to 3 seconds."""
+ info = ServiceInfo(type, name)
+ if info.request(self, timeout):
+ return info
+ return None
+
+ def addServiceListener(self, type, listener):
+ """Adds a listener for a particular service type. This object
+ will then have its updateRecord method called when information
+ arrives for that type."""
+ self.removeServiceListener(listener)
+ self.browsers.append(ServiceBrowser(self, type, listener))
+
+ def removeServiceListener(self, listener):
+ """Removes a listener from the set that is currently listening."""
+ for browser in self.browsers:
+ if browser.listener == listener:
+ browser.cancel()
+ del(browser)
+
+ def registerService(self, info, ttl=_DNS_TTL):
+ """Registers service information to the network with a default TTL
+ of 60 seconds. Zeroconf will then respond to requests for
+ information for that service. The name of the service may be
+ changed if needed to make it unique on the network."""
+ self.checkService(info)
+ self.services[info.name.lower()] = info
+ if self.servicetypes.has_key(info.type):
+ self.servicetypes[info.type]+=1
+ else:
+ self.servicetypes[info.type]=1
+ now = currentTimeMillis()
+ nextTime = now
+ i = 0
+ while i < 3:
+ if now < nextTime:
+ self.wait(nextTime - now)
+ now = currentTimeMillis()
+ continue
+ out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
+ out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, ttl, info.name), 0)
+ out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, ttl, info.priority, info.weight, info.port, info.server), 0)
+ out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, ttl, info.text), 0)
+ if info.address:
+ out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, ttl, info.address), 0)
+ self.send(out)
+ i += 1
+ nextTime += _REGISTER_TIME
+
+ def unregisterService(self, info):
+ """Unregister a service."""
+ try:
+ del(self.services[info.name.lower()])
+ if self.servicetypes[info.type]>1:
+ self.servicetypes[info.type]-=1
+ else:
+ del self.servicetypes[info.type]
+ except:
+ pass
+ now = currentTimeMillis()
+ nextTime = now
+ i = 0
+ while i < 3:
+ if now < nextTime:
+ self.wait(nextTime - now)
+ now = currentTimeMillis()
+ continue
+ out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
+ out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
+ out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.name), 0)
+ out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
+ if info.address:
+ out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, info.address), 0)
+ self.send(out)
+ i += 1
+ nextTime += _UNREGISTER_TIME
+
+ def unregisterAllServices(self):
+ """Unregister all registered services."""
+ if len(self.services) > 0:
+ now = currentTimeMillis()
+ nextTime = now
+ i = 0
+ while i < 3:
+ if now < nextTime:
+ self.wait(nextTime - now)
+ now = currentTimeMillis()
+ continue
+ out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
+ for info in self.services.values():
+ out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
+ out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.server), 0)
+ out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
+ if info.address:
+ out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, info.address), 0)
+ self.send(out)
+ i += 1
+ nextTime += _UNREGISTER_TIME
+
+ def checkService(self, info):
+ """Checks the network for a unique service name, modifying the
+ ServiceInfo passed in if it is not unique."""
+ now = currentTimeMillis()
+ nextTime = now
+ i = 0
+ while i < 3:
+ for record in self.cache.entriesWithName(info.type):
+ if record.type == _TYPE_PTR and not record.isExpired(now) and record.alias == info.name:
+ if (info.name.find('.') < 0):
+ info.name = info.name + ".[" + info.address + ":" + info.port + "]." + info.type
+ self.checkService(info)
+ return
+ raise NonUniqueNameException
+ if now < nextTime:
+ self.wait(nextTime - now)
+ now = currentTimeMillis()
+ continue
+ out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA)
+ self.debug = out
+ out.addQuestion(DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN))
+ out.addAuthorativeAnswer(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, info.name))
+ self.send(out)
+ i += 1
+ nextTime += _CHECK_TIME
+
+ def addListener(self, listener, question):
+ """Adds a listener for a given question. The listener will have
+ its updateRecord method called when information is available to
+ answer the question."""
+ now = currentTimeMillis()
+ self.listeners.append(listener)
+ if question is not None:
+ for record in self.cache.entriesWithName(question.name):
+ if question.answeredBy(record) and not record.isExpired(now):
+ listener.updateRecord(self, now, record)
+ self.notifyAll()
+
+ def removeListener(self, listener):
+ """Removes a listener."""
+ try:
+ self.listeners.remove(listener)
+ self.notifyAll()
+ except:
+ pass
+
+ def updateRecord(self, now, rec):
+ """Used to notify listeners of new information that has updated
+ a record."""
+ for listener in self.listeners:
+ listener.updateRecord(self, now, rec)
+ self.notifyAll()
+
+ def handleResponse(self, msg):
+ """Deal with incoming response packets. All answers
+ are held in the cache, and listeners are notified."""
+ now = currentTimeMillis()
+ for record in msg.answers:
+ expired = record.isExpired(now)
+ if record in self.cache.entries():
+ if expired:
+ self.cache.remove(record)
+ else:
+ entry = self.cache.get(record)
+ if entry is not None:
+ entry.resetTTL(record)
+ record = entry
+ else:
+ self.cache.add(record)
+
+ self.updateRecord(now, record)
+
+ def handleQuery(self, msg, addr, port):
+ """Deal with incoming query packets. Provides a response if
+ possible."""
+ out = None
+
+ # Support unicast client responses
+ #
+ if port != _MDNS_PORT:
+ out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, 0)
+ for question in msg.questions:
+ out.addQuestion(question)
+
+ for question in msg.questions:
+ if question.type == _TYPE_PTR:
+ if question.name == "_services._dns-sd._udp.local.":
+ for stype in self.servicetypes.keys():
+ if out is None:
+ out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
+ out.addAnswer(msg, DNSPointer("_services._dns-sd._udp.local.", _TYPE_PTR, _CLASS_IN, _DNS_TTL, stype))
+ for service in self.services.values():
+ if question.name == service.type:
+ if out is None:
+ out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
+ out.addAnswer(msg, DNSPointer(service.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, service.name))
+ else:
+ try:
+ if out is None:
+ out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
+
+ # Answer A record queries for any service addresses we know
+ if question.type == _TYPE_A or question.type == _TYPE_ANY:
+ for service in self.services.values():
+ if service.server == question.name.lower():
+ out.addAnswer(msg, DNSAddress(question.name, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
+
+ service = self.services.get(question.name.lower(), None)
+ if not service: continue
+
+ if question.type == _TYPE_SRV or question.type == _TYPE_ANY:
+ out.addAnswer(msg, DNSService(question.name, _TYPE_SRV, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.priority, service.weight, service.port, service.server))
+ if question.type == _TYPE_TXT or question.type == _TYPE_ANY:
+ out.addAnswer(msg, DNSText(question.name, _TYPE_TXT, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.text))
+ if question.type == _TYPE_SRV:
+ out.addAdditionalAnswer(DNSAddress(service.server, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
+ except:
+ traceback.print_exc()
+
+ if out is not None and out.answers:
+ out.id = msg.id
+ self.send(out, addr, port)
+
+ def send(self, out, addr = _MDNS_ADDR, port = _MDNS_PORT):
+ """Sends an outgoing packet."""
+ # This is a quick test to see if we can parse the packets we generate
+ #temp = DNSIncoming(out.packet())
+ try:
+ self.socket.sendto(out.packet(), 0, (addr, port))
+ except:
+ # Ignore this, it may be a temporary loss of network connection
+ pass
+
+ def close(self):
+ """Ends the background threads, and prevent this instance from
+ servicing further queries."""
+ if globals()['_GLOBAL_DONE'] == 0:
+ globals()['_GLOBAL_DONE'] = 1
+ self.notifyAll()
+ self.engine.notify()
+ self.unregisterAllServices()
+ self.socket.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
+ self.socket.close()
+
+# Test a few module features, including service registration, service
+# query (for Zoe), and service unregistration.
+
+if __name__ == '__main__':
+ print "Multicast DNS Service Discovery for Python, version", __version__
+ r = Zeroconf()
+ print "1. Testing registration of a service..."
+ desc = {'version':'0.10','a':'test value', 'b':'another value'}
+ info = ServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local.", socket.inet_aton("127.0.0.1"), 1234, 0, 0, desc)
+ print " Registering service..."
+ r.registerService(info)
+ print " Registration done."
+ print "2. Testing query of service information..."
+ print " Getting ZOE service:", str(r.getServiceInfo("_http._tcp.local.", "ZOE._http._tcp.local."))
+ print " Query done."
+ print "3. Testing query of own service..."
+ print " Getting self:", str(r.getServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local."))
+ print " Query done."
+ print "4. Testing unregister of service information..."
+ r.unregisterService(info)
+ print " Unregister done."
+ r.close()
diff --git a/sys/src/cmd/hg/hgext/zeroconf/__init__.py b/sys/src/cmd/hg/hgext/zeroconf/__init__.py
new file mode 100644
index 000000000..a57bbf593
--- /dev/null
+++ b/sys/src/cmd/hg/hgext/zeroconf/__init__.py
@@ -0,0 +1,159 @@
+# zeroconf.py - zeroconf support for Mercurial
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''discover and advertise repositories on the local network
+
+Zeroconf enabled repositories will be announced in a network without
+the need to configure a server or a service. They can be discovered
+without knowing their actual IP address.
+
+To allow other people to discover your repository using run "hg serve"
+in your repository::
+
+ $ cd test
+ $ hg serve
+
+You can discover zeroconf enabled repositories by running "hg paths"::
+
+ $ hg paths
+ zc-test = http://example.com:8000/test
+'''
+
+import Zeroconf, socket, time, os
+from mercurial import ui
+from mercurial import extensions
+from mercurial.hgweb import hgweb_mod
+from mercurial.hgweb import hgwebdir_mod
+
+# publish
+
+server = None
+localip = None
+
+def getip():
+ # finds external-facing interface without sending any packets (Linux)
+ try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ s.connect(('1.0.0.1', 0))
+ ip = s.getsockname()[0]
+ return ip
+ except:
+ pass
+
+ # Generic method, sometimes gives useless results
+ try:
+ dumbip = socket.gethostbyaddr(socket.gethostname())[2][0]
+ if not dumbip.startswith('127.') and ':' not in dumbip:
+ return dumbip
+ except socket.gaierror:
+ dumbip = '127.0.0.1'
+
+ # works elsewhere, but actually sends a packet
+ try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ s.connect(('1.0.0.1', 1))
+ ip = s.getsockname()[0]
+ return ip
+ except:
+ pass
+
+ return dumbip
+
+def publish(name, desc, path, port):
+ global server, localip
+ if not server:
+ ip = getip()
+ if ip.startswith('127.'):
+ # if we have no internet connection, this can happen.
+ return
+ localip = socket.inet_aton(ip)
+ server = Zeroconf.Zeroconf(ip)
+
+ hostname = socket.gethostname().split('.')[0]
+ host = hostname + ".local"
+ name = "%s-%s" % (hostname, name)
+
+ # advertise to browsers
+ svc = Zeroconf.ServiceInfo('_http._tcp.local.',
+ name + '._http._tcp.local.',
+ server = host,
+ port = port,
+ properties = {'description': desc,
+ 'path': "/" + path},
+ address = localip, weight = 0, priority = 0)
+ server.registerService(svc)
+
+ # advertise to Mercurial clients
+ svc = Zeroconf.ServiceInfo('_hg._tcp.local.',
+ name + '._hg._tcp.local.',
+ server = host,
+ port = port,
+ properties = {'description': desc,
+ 'path': "/" + path},
+ address = localip, weight = 0, priority = 0)
+ server.registerService(svc)
+
+class hgwebzc(hgweb_mod.hgweb):
+ def __init__(self, repo, name=None):
+ super(hgwebzc, self).__init__(repo, name)
+ name = self.reponame or os.path.basename(repo.root)
+ desc = self.repo.ui.config("web", "description", name)
+ publish(name, desc, name, int(repo.ui.config("web", "port", 8000)))
+
+class hgwebdirzc(hgwebdir_mod.hgwebdir):
+ def run(self):
+ for r, p in self.repos:
+ u = self.ui.copy()
+ u.readconfig(os.path.join(p, '.hg', 'hgrc'))
+ n = os.path.basename(r)
+ publish(n, "hgweb", p, int(u.config("web", "port", 8000)))
+ return super(hgwebdirzc, self).run()
+
+# listen
+
+class listener(object):
+ def __init__(self):
+ self.found = {}
+ def removeService(self, server, type, name):
+ if repr(name) in self.found:
+ del self.found[repr(name)]
+ def addService(self, server, type, name):
+ self.found[repr(name)] = server.getServiceInfo(type, name)
+
+def getzcpaths():
+ ip = getip()
+ if ip.startswith('127.'):
+ return
+ server = Zeroconf.Zeroconf(ip)
+ l = listener()
+ Zeroconf.ServiceBrowser(server, "_hg._tcp.local.", l)
+ time.sleep(1)
+ server.close()
+ for v in l.found.values():
+ n = v.name[:v.name.index('.')]
+ n.replace(" ", "-")
+ u = "http://%s:%s%s" % (socket.inet_ntoa(v.address), v.port,
+ v.properties.get("path", "/"))
+ yield "zc-" + n, u
+
+def config(orig, self, section, key, default=None, untrusted=False):
+ if section == "paths" and key.startswith("zc-"):
+ for n, p in getzcpaths():
+ if n == key:
+ return p
+ return orig(self, section, key, default, untrusted)
+
+def configitems(orig, self, section, untrusted=False):
+ r = orig(self, section, untrusted)
+ if section == "paths":
+ r += getzcpaths()
+ return r
+
+extensions.wrapfunction(ui.ui, 'config', config)
+extensions.wrapfunction(ui.ui, 'configitems', configitems)
+hgweb_mod.hgweb = hgwebzc
+hgwebdir_mod.hgwebdir = hgwebdirzc