Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # HG changeset patch
- # User Durham Goode <durham@fb.com>
- # Date 1399499314 25200
- # Wed May 07 14:48:34 2014 -0700
- # Node ID 9999b5d32f4ebe1cbd7f5313b0cf53bfe208f3f6
- # Parent d19164a018a175cda640066a1f32651834e4f011
- Phase cache in transaction
- diff --git a/mercurial/commands.py b/mercurial/commands.py
- --- a/mercurial/commands.py
- +++ b/mercurial/commands.py
- @@ -4478,8 +4478,10 @@
- ctx = repo[r]
- ui.write('%i: %s\n' % (ctx.rev(), ctx.phasestr()))
- else:
- - lock = repo.lock()
- + lock = tr = None
- try:
- + lock = repo.lock()
- + tr = repo.transaction("phase")
- # set phase
- if not revs:
- raise util.Abort(_('empty revision set'))
- @@ -4488,8 +4490,9 @@
- phases.advanceboundary(repo, targetphase, nodes)
- if opts['force']:
- phases.retractboundary(repo, targetphase, nodes)
- + tr.close()
- finally:
- - lock.release()
- + release(tr, lock)
- # moving revision from public to draft may hide them
- # We have to check result on an unfiltered repository
- unfi = repo.unfiltered()
- diff --git a/mercurial/exchange.py b/mercurial/exchange.py
- --- a/mercurial/exchange.py
- +++ b/mercurial/exchange.py
- @@ -377,34 +377,40 @@
- # courtesy to publish changesets possibly locally draft
- # on the remote.
- remotephases = {'publishing': 'True'}
- - if not remotephases: # old server or public only reply from non-publishing
- - _localphasemove(pushop, cheads)
- - # don't push any phase data as there is nothing to push
- - else:
- - ana = phases.analyzeremotephases(pushop.repo, cheads,
- - remotephases)
- - pheads, droots = ana
- - ### Apply remote phase on local
- - if remotephases.get('publishing', False):
- +
- + tr = pushop.repo.transaction("phases")
- + try:
- + if not remotephases: # old server or public only reply from non-publishing
- _localphasemove(pushop, cheads)
- - else: # publish = False
- - _localphasemove(pushop, pheads)
- - _localphasemove(pushop, cheads, phases.draft)
- - ### Apply local phase on remote
- + # don't push any phase data as there is nothing to push
- + else:
- + ana = phases.analyzeremotephases(pushop.repo, cheads,
- + remotephases)
- + pheads, droots = ana
- + ### Apply remote phase on local
- + if remotephases.get('publishing', False):
- + _localphasemove(pushop, cheads)
- + else: # publish = False
- + _localphasemove(pushop, pheads)
- + _localphasemove(pushop, cheads, phases.draft)
- + ### Apply local phase on remote
- - # Get the list of all revs draft on remote by public here.
- - # XXX Beware that revset break if droots is not strictly
- - # XXX root we may want to ensure it is but it is costly
- - outdated = unfi.set('heads((%ln::%ln) and public())',
- - droots, cheads)
- - for newremotehead in outdated:
- - r = pushop.remote.pushkey('phases',
- - newremotehead.hex(),
- - str(phases.draft),
- - str(phases.public))
- - if not r:
- - pushop.ui.warn(_('updating %s to public failed!\n')
- - % newremotehead)
- + # Get the list of all revs draft on remote by public here.
- + # XXX Beware that revset break if droots is not strictly
- + # XXX root we may want to ensure it is but it is costly
- + outdated = unfi.set('heads((%ln::%ln) and public())',
- + droots, cheads)
- + for newremotehead in outdated:
- + r = pushop.remote.pushkey('phases',
- + newremotehead.hex(),
- + str(phases.draft),
- + str(phases.public))
- + if not r:
- + pushop.ui.warn(_('updating %s to public failed!\n')
- + % newremotehead)
- + tr.close()
- + finally:
- + tr.release()
- def _localphasemove(pushop, nodes, phase=phases.public):
- """move <nodes> to <phase> in the local source repo"""
- diff --git a/mercurial/localrepo.py b/mercurial/localrepo.py
- --- a/mercurial/localrepo.py
- +++ b/mercurial/localrepo.py
- @@ -862,6 +862,8 @@
- def onclose():
- self.store.write(tr)
- + if hasunfilteredcache(self, '_phasecache'):
- + self._phasecache.write(tr)
- self._writejournal(desc)
- renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
- @@ -876,10 +878,12 @@
- def _journalfiles(self):
- return ((self.svfs, 'journal'),
- + (self.svfs, 'journal.backupfiles'),
- (self.vfs, 'journal.dirstate'),
- (self.vfs, 'journal.branch'),
- (self.vfs, 'journal.desc'),
- (self.vfs, 'journal.bookmarks'),
- + (self.svfs, 'journal.fncache'),
- (self.svfs, 'journal.phaseroots'))
- def undofiles(self):
- @@ -894,8 +898,6 @@
- "%d\n%s\n" % (len(self), desc))
- self.opener.write("journal.bookmarks",
- self.opener.tryread("bookmarks"))
- - self.sopener.write("journal.phaseroots",
- - self.sopener.tryread("phaseroots"))
- def recover(self):
- lock = self.lock()
- @@ -956,39 +958,43 @@
- if dryrun:
- return 0
- - parents = self.dirstate.parents()
- - self.destroying()
- - transaction.rollback(self.sopener, 'undo', ui.warn)
- - if self.vfs.exists('undo.bookmarks'):
- - self.vfs.rename('undo.bookmarks', 'bookmarks')
- - if self.svfs.exists('undo.phaseroots'):
- - self.svfs.rename('undo.phaseroots', 'phaseroots')
- - self.invalidate()
- + tr = self.transaction("rollback")
- + try:
- + parents = self.dirstate.parents()
- + self.destroying(tr)
- + transaction.rollback(self.sopener, 'undo', ui.warn)
- + if self.vfs.exists('undo.bookmarks'):
- + self.vfs.rename('undo.bookmarks', 'bookmarks')
- + self.invalidate()
- - parentgone = (parents[0] not in self.changelog.nodemap or
- - parents[1] not in self.changelog.nodemap)
- - if parentgone:
- - self.vfs.rename('undo.dirstate', 'dirstate')
- - try:
- - branch = self.opener.read('undo.branch')
- - self.dirstate.setbranch(encoding.tolocal(branch))
- - except IOError:
- - ui.warn(_('named branch could not be reset: '
- - 'current branch is still \'%s\'\n')
- - % self.dirstate.branch())
- + parentgone = (parents[0] not in self.changelog.nodemap or
- + parents[1] not in self.changelog.nodemap)
- + if parentgone:
- + self.vfs.rename('undo.dirstate', 'dirstate')
- + try:
- + branch = self.opener.read('undo.branch')
- + self.dirstate.setbranch(encoding.tolocal(branch))
- + except IOError:
- + ui.warn(_('named branch could not be reset: '
- + 'current branch is still \'%s\'\n')
- + % self.dirstate.branch())
- - self.dirstate.invalidate()
- - parents = tuple([p.rev() for p in self.parents()])
- - if len(parents) > 1:
- - ui.status(_('working directory now based on '
- - 'revisions %d and %d\n') % parents)
- - else:
- - ui.status(_('working directory now based on '
- - 'revision %d\n') % parents)
- - # TODO: if we know which new heads may result from this rollback, pass
- - # them to destroy(), which will prevent the branchhead cache from being
- - # invalidated.
- - self.destroyed()
- + self.dirstate.invalidate()
- + parents = tuple([p.rev() for p in self.parents()])
- + if len(parents) > 1:
- + ui.status(_('working directory now based on '
- + 'revisions %d and %d\n') % parents)
- + else:
- + ui.status(_('working directory now based on '
- + 'revision %d\n') % parents)
- + # TODO: if we know which new heads may result from this rollback,
- + # pass them to destroy(), which will prevent the branchhead cache
- + # from being invalidated.
- + self.destroyed(tr)
- + tr.close()
- + finally:
- + tr.release()
- +
- return 0
- def invalidatecaches(self):
- @@ -1079,8 +1085,6 @@
- return l
- def unlock():
- - if hasunfilteredcache(self, '_phasecache'):
- - self._phasecache.write()
- for k, ce in self._filecache.items():
- if k == 'dirstate' or k not in self.__dict__:
- continue
- @@ -1442,7 +1446,7 @@
- lock.release()
- @unfilteredmethod
- - def destroying(self):
- + def destroying(self, tr):
- '''Inform the repository that nodes are about to be destroyed.
- Intended for use by strip and rollback, so there's a common
- place for anything that has to be done before destroying history.
- @@ -1457,10 +1461,10 @@
- # dirty after committing. Then when we strip, the repo is invalidated,
- # causing those changes to disappear.
- if '_phasecache' in vars(self):
- - self._phasecache.write()
- + self._phasecache.write(tr)
- @unfilteredmethod
- - def destroyed(self):
- + def destroyed(self, tr):
- '''Inform the repository that nodes have been destroyed.
- Intended for use by strip and rollback, so there's a common
- place for anything that has to be done after destroying history.
- @@ -1474,7 +1478,7 @@
- # causing it to reload next time it is accessed, or simply filter
- # the removed nodes now and write the updated cache.
- self._phasecache.filterunknown(self)
- - self._phasecache.write()
- + self._phasecache.write(tr)
- # update the 'served' branch cache to help read only server process
- # Thanks to branchcache collaboration this is done from the nearest
- diff --git a/mercurial/phases.py b/mercurial/phases.py
- --- a/mercurial/phases.py
- +++ b/mercurial/phases.py
- @@ -191,9 +191,11 @@
- self._phaserevs = self.getphaserevs(repo, rebuild=True)
- return self._phaserevs[rev]
- - def write(self):
- + def write(self, tr):
- if not self.dirty:
- return
- +
- + tr.addbackup('phaseroots')
- f = self.opener('phaseroots', 'w', atomictemp=True)
- try:
- for phase, roots in enumerate(self.phaseroots):
- @@ -212,6 +214,10 @@
- # Be careful to preserve shallow-copied values: do not update
- # phaseroots values, replace them.
- + tr = repo._transref and repo._transref() or None
- + if not tr:
- + raise Exception("NO TRANSACTION ON ADVANCE BOUNDARY")
- +
- repo = repo.unfiltered()
- delroots = [] # set of root deleted by this path
- for phase in xrange(targetphase + 1, len(allphases)):
- @@ -236,6 +242,10 @@
- # Be careful to preserve shallow-copied values: do not update
- # phaseroots values, replace them.
- + tr = repo._transref and repo._transref() or None
- + if not tr:
- + raise Exception("NO TRANSACTION ON RETRACT BOUNDARY")
- +
- repo = repo.unfiltered()
- currentroots = self.phaseroots[targetphase]
- newroots = [n for n in nodes
- @@ -255,6 +265,7 @@
- Nothing is lost as unknown nodes only hold data for their descendants.
- """
- +
- filtered = False
- nodemap = repo.changelog.nodemap # to filter unknown nodes
- for phase, nodes in enumerate(self.phaseroots):
- diff --git a/mercurial/repair.py b/mercurial/repair.py
- --- a/mercurial/repair.py
- +++ b/mercurial/repair.py
- @@ -49,7 +49,6 @@
- def strip(ui, repo, nodelist, backup="all", topic='backup'):
- repo = repo.unfiltered()
- - repo.destroying()
- cl = repo.changelog
- # TODO handle undo of merge sets
- @@ -123,6 +122,8 @@
- mfst = repo.manifest
- tr = repo.transaction("strip")
- + repo.destroying(tr)
- +
- offset = len(tr.entries)
- try:
- @@ -139,6 +140,8 @@
- repo.sopener(file, 'a').truncate(troffset)
- if troffset == 0:
- repo.store.markremoved(file)
- +
- + repo.destroyed(tr)
- tr.close()
- except: # re-raises
- tr.abort()
- @@ -179,5 +182,3 @@
- ui.warn(_("strip failed, partial bundle stored in '%s'\n")
- % vfs.join(chgrpfile))
- raise
- -
- - repo.destroyed()
- diff --git a/mercurial/transaction.py b/mercurial/transaction.py
- --- a/mercurial/transaction.py
- +++ b/mercurial/transaction.py
- @@ -169,7 +169,7 @@
- self.backupentries.append((file, backupfile, None))
- self.backupmap[file] = len(self.backupentries) - 1
- - self.backupsfile.write("%s\0%s\0" % (file, backupfile))
- + self.backupsfile.write("%s\n" % (file))
- self.backupsfile.flush()
- @active
- @@ -297,11 +297,11 @@
- backupjournal = "%s.backupfiles" % file
- if opener.exists(backupjournal):
- fp = opener.open(backupjournal)
- - data = fp.read()
- - if len(data) > 0:
- - parts = data.split('\0')
- - for i in xrange(0, len(parts), 2):
- - f, b = parts[i:i + 1]
- - backupentries.append((f, b, None))
- + lines = fp.readlines()
- + fp.close()
- + for l in lines:
- + f = l[:-1]
- + backup = '%s.%s' % (file, f)
- + backupentries.append((f, backup, None))
- _playback(file, report, opener, entries, backupentries)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement