280
246
if message_callback is None:
281
247
if message is not None:
282
248
if isinstance(message, str):
283
message = message.decode(get_user_encoding())
249
message = message.decode(bzrlib.user_encoding)
284
250
message_callback = lambda x: message
286
252
raise BzrError("The message or message_callback keyword"
287
253
" parameter is required for commit().")
289
255
self.bound_branch = None
256
self.any_entries_changed = False
290
257
self.any_entries_deleted = False
291
if exclude is not None:
292
self.exclude = sorted(
293
minimum_path_selection(exclude))
296
258
self.local = local
297
259
self.master_branch = None
260
self.master_locked = False
298
261
self.recursive = recursive
299
262
self.rev_id = None
300
# self.specific_files is None to indicate no filter, or any iterable to
301
# indicate a filter - [] means no files at all, as per iter_changes.
302
263
if specific_files is not None:
303
264
self.specific_files = sorted(
304
265
minimum_path_selection(specific_files))
306
267
self.specific_files = None
268
self.specific_file_ids = None
308
269
self.allow_pointless = allow_pointless
270
self.revprops = revprops
309
271
self.message_callback = message_callback
310
272
self.timestamp = timestamp
311
273
self.timezone = timezone
312
274
self.committer = committer
313
275
self.strict = strict
314
276
self.verbose = verbose
277
# accumulates an inventory delta to the basis entry, so we can make
278
# just the necessary updates to the workingtree's cached basis.
279
self._basis_delta = []
316
281
self.work_tree.lock_write()
317
operation.add_cleanup(self.work_tree.unlock)
318
self.parents = self.work_tree.get_parent_ids()
319
# We can use record_iter_changes IFF iter_changes is compatible with
320
# the command line parameters, and the repository has fast delta
321
# generation. See bug 347649.
322
self.use_record_iter_changes = (
324
not self.branch.repository._format.supports_tree_reference and
325
(self.branch.repository._format.fast_deltas or
326
len(self.parents) < 2))
327
282
self.pb = bzrlib.ui.ui_factory.nested_progress_bar()
328
operation.add_cleanup(self.pb.finished)
329
283
self.basis_revid = self.work_tree.last_revision()
330
284
self.basis_tree = self.work_tree.basis_tree()
331
285
self.basis_tree.lock_read()
332
operation.add_cleanup(self.basis_tree.unlock)
333
# Cannot commit with conflicts present.
334
if len(self.work_tree.conflicts()) > 0:
335
raise ConflictsInTree
337
# Setup the bound branch variables as needed.
338
self._check_bound_branch(operation, possible_master_transports)
340
# Check that the working tree is up to date
341
old_revno, new_revno = self._check_out_of_date_tree()
343
# Complete configuration setup
344
if reporter is not None:
345
self.reporter = reporter
346
elif self.reporter is None:
347
self.reporter = self._select_reporter()
348
if self.config is None:
349
self.config = self.branch.get_config()
351
self._set_specific_file_ids()
353
# Setup the progress bar. As the number of files that need to be
354
# committed in unknown, progress is reported as stages.
355
# We keep track of entries separately though and include that
356
# information in the progress bar during the relevant stages.
357
self.pb_stage_name = ""
358
self.pb_stage_count = 0
359
self.pb_stage_total = 5
360
if self.bound_branch:
361
self.pb_stage_total += 1
362
self.pb.show_pct = False
363
self.pb.show_spinner = False
364
self.pb.show_eta = False
365
self.pb.show_count = True
366
self.pb.show_bar = True
368
self._gather_parents()
369
# After a merge, a selected file commit is not supported.
370
# See 'bzr help merge' for an explanation as to why.
371
if len(self.parents) > 1 and self.specific_files is not None:
372
raise errors.CannotCommitSelectedFileMerge(self.specific_files)
373
# Excludes are a form of selected file commit.
374
if len(self.parents) > 1 and self.exclude:
375
raise errors.CannotCommitSelectedFileMerge(self.exclude)
377
# Collect the changes
378
self._set_progress_stage("Collecting changes", counter=True)
379
self.builder = self.branch.get_commit_builder(self.parents,
380
self.config, timestamp, timezone, committer, self.revprops, rev_id)
383
self.builder.will_record_deletes()
384
# find the location being committed to
385
if self.bound_branch:
386
master_location = self.master_branch.base
388
master_location = self.branch.base
390
# report the start of the commit
391
self.reporter.started(new_revno, self.rev_id, master_location)
393
self._update_builder_with_changes()
394
self._check_pointless()
396
# TODO: Now the new inventory is known, check for conflicts.
397
# ADHB 2006-08-08: If this is done, populate_new_inv should not add
398
# weave lines, because nothing should be recorded until it is known
399
# that commit will succeed.
400
self._set_progress_stage("Saving data locally")
401
self.builder.finish_inventory()
403
# Prompt the user for a commit message if none provided
404
message = message_callback(self)
405
self.message = message
407
# Add revision data to the local branch
408
self.rev_id = self.builder.commit(self.message)
411
mutter("aborting commit write group because of exception:")
412
trace.log_exception_quietly()
413
note("aborting commit write group: %r" % (e,))
417
self._process_pre_hooks(old_revno, new_revno)
419
# Upload revision data to the master.
420
# this will propagate merged revisions too if needed.
421
if self.bound_branch:
422
self._set_progress_stage("Uploading data to master branch")
423
# 'commit' to the master first so a timeout here causes the
424
# local branch to be out of date
425
self.master_branch.import_last_revision_info(
426
self.branch.repository, new_revno, self.rev_id)
428
# and now do the commit locally.
429
self.branch.set_last_revision_info(new_revno, self.rev_id)
431
# Make the working tree be up to date with the branch. This
432
# includes automatic changes scheduled to be made to the tree, such
433
# as updating its basis and unversioning paths that were missing.
434
self.work_tree.unversion(self.deleted_ids)
435
self._set_progress_stage("Updating the working tree")
436
self.work_tree.update_basis_by_delta(self.rev_id,
437
self.builder.get_basis_delta())
438
self.reporter.completed(new_revno, self.rev_id)
439
self._process_post_hooks(old_revno, new_revno)
287
# Cannot commit with conflicts present.
288
if len(self.work_tree.conflicts()) > 0:
289
raise ConflictsInTree
291
# Setup the bound branch variables as needed.
292
self._check_bound_branch()
294
# Check that the working tree is up to date
295
old_revno, new_revno = self._check_out_of_date_tree()
297
# Complete configuration setup
298
if reporter is not None:
299
self.reporter = reporter
300
elif self.reporter is None:
301
self.reporter = self._select_reporter()
302
if self.config is None:
303
self.config = self.branch.get_config()
305
# If provided, ensure the specified files are versioned
306
if self.specific_files is not None:
307
# Note: This routine is being called because it raises
308
# PathNotVersionedError as a side effect of finding the IDs. We
309
# later use the ids we found as input to the working tree
310
# inventory iterator, so we only consider those ids rather than
311
# examining the whole tree again.
312
# XXX: Dont we have filter_unversioned to do this more
314
self.specific_file_ids = tree.find_ids_across_trees(
315
specific_files, [self.basis_tree, self.work_tree])
317
# Setup the progress bar. As the number of files that need to be
318
# committed in unknown, progress is reported as stages.
319
# We keep track of entries separately though and include that
320
# information in the progress bar during the relevant stages.
321
self.pb_stage_name = ""
322
self.pb_stage_count = 0
323
self.pb_stage_total = 5
324
if self.bound_branch:
325
self.pb_stage_total += 1
326
self.pb.show_pct = False
327
self.pb.show_spinner = False
328
self.pb.show_eta = False
329
self.pb.show_count = True
330
self.pb.show_bar = True
332
# After a merge, a selected file commit is not supported.
333
# See 'bzr help merge' for an explanation as to why.
334
self.basis_inv = self.basis_tree.inventory
335
self._gather_parents()
336
if len(self.parents) > 1 and self.specific_files:
337
raise errors.CannotCommitSelectedFileMerge(self.specific_files)
339
# Collect the changes
340
self._set_progress_stage("Collecting changes",
341
entries_title="Directory")
342
self.builder = self.branch.get_commit_builder(self.parents,
343
self.config, timestamp, timezone, committer, revprops, rev_id)
346
# find the location being committed to
347
if self.bound_branch:
348
master_location = self.master_branch.base
350
master_location = self.branch.base
352
# report the start of the commit
353
self.reporter.started(new_revno, self.rev_id, master_location)
355
self._update_builder_with_changes()
356
self._report_and_accumulate_deletes()
357
self._check_pointless()
359
# TODO: Now the new inventory is known, check for conflicts.
360
# ADHB 2006-08-08: If this is done, populate_new_inv should not add
361
# weave lines, because nothing should be recorded until it is known
362
# that commit will succeed.
363
self._set_progress_stage("Saving data locally")
364
self.builder.finish_inventory()
366
# Prompt the user for a commit message if none provided
367
message = message_callback(self)
368
self.message = message
369
self._escape_commit_message()
371
# Add revision data to the local branch
372
self.rev_id = self.builder.commit(self.message)
378
self._process_pre_hooks(old_revno, new_revno)
380
# Upload revision data to the master.
381
# this will propagate merged revisions too if needed.
382
if self.bound_branch:
383
if not self.master_branch.repository.has_same_location(
384
self.branch.repository):
385
self._set_progress_stage("Uploading data to master branch")
386
self.master_branch.repository.fetch(self.branch.repository,
387
revision_id=self.rev_id)
388
# now the master has the revision data
389
# 'commit' to the master first so a timeout here causes the
390
# local branch to be out of date
391
self.master_branch.set_last_revision_info(new_revno,
394
# and now do the commit locally.
395
self.branch.set_last_revision_info(new_revno, self.rev_id)
397
# Make the working tree up to date with the branch
398
self._set_progress_stage("Updating the working tree")
399
self.work_tree.update_basis_by_delta(self.rev_id,
401
self.reporter.completed(new_revno, self.rev_id)
402
self._process_post_hooks(old_revno, new_revno)
440
405
return self.rev_id
442
407
def _select_reporter(self):
596
563
old_revno, old_revid, new_revno, self.rev_id,
597
564
tree_delta, future_tree)
567
"""Cleanup any open locks, progress bars etc."""
568
cleanups = [self._cleanup_bound_branch,
569
self.basis_tree.unlock,
570
self.work_tree.unlock,
572
found_exception = None
573
for cleanup in cleanups:
576
# we want every cleanup to run no matter what.
577
# so we have a catchall here, but we will raise the
578
# last encountered exception up the stack: and
579
# typically this will be useful enough.
582
if found_exception is not None:
583
# don't do a plan raise, because the last exception may have been
584
# trashed, e is our sure-to-work exception even though it loses the
585
# full traceback. XXX: RBC 20060421 perhaps we could check the
586
# exc_info and if its the same one do a plain raise otherwise
587
# 'raise e' as we do now.
590
def _cleanup_bound_branch(self):
591
"""Executed at the end of a try/finally to cleanup a bound branch.
593
If the branch wasn't bound, this is a no-op.
594
If it was, it resents self.branch to the local branch, instead
597
if not self.bound_branch:
599
if self.master_locked:
600
self.master_branch.unlock()
602
def _escape_commit_message(self):
603
"""Replace xml-incompatible control characters."""
604
# FIXME: RBC 20060419 this should be done by the revision
605
# serialiser not by commit. Then we can also add an unescaper
606
# in the deserializer and start roundtripping revision messages
607
# precisely. See repository_implementations/test_repository.py
609
# Python strings can include characters that can't be
610
# represented in well-formed XML; escape characters that
611
# aren't listed in the XML specification
612
# (http://www.w3.org/TR/REC-xml/#NT-Char).
613
self.message, escape_count = re.subn(
614
u'[^\x09\x0A\x0D\u0020-\uD7FF\uE000-\uFFFD]+',
615
lambda match: match.group(0).encode('unicode_escape'),
618
self.reporter.escaped(escape_count, self.message)
599
620
def _gather_parents(self):
600
621
"""Record the parents of a merge for merge detection."""
601
# TODO: Make sure that this list doesn't contain duplicate
622
# TODO: Make sure that this list doesn't contain duplicate
602
623
# entries and the order is preserved when doing this.
603
if self.use_record_iter_changes:
605
self.basis_inv = self.basis_tree.inventory
624
self.parents = self.work_tree.get_parent_ids()
606
625
self.parent_invs = [self.basis_inv]
607
626
for revision in self.parents[1:]:
608
627
if self.branch.repository.has_revision(revision):
615
634
def _update_builder_with_changes(self):
616
635
"""Update the commit builder with the data about what has changed.
618
exclude = self.exclude
637
# Build the revision inventory.
639
# This starts by creating a new empty inventory. Depending on
640
# which files are selected for commit, and what is present in the
641
# current tree, the new inventory is populated. inventory entries
642
# which are candidates for modification have their revision set to
643
# None; inventory entries that are carried over untouched have their
644
# revision set to their prior value.
646
# ESEPARATIONOFCONCERNS: this function is diffing and using the diff
647
# results to create a new inventory at the same time, which results
648
# in bugs like #46635. Any reason not to use/enhance Tree.changes_from?
619
651
specific_files = self.specific_files
620
652
mutter("Selecting files for commit with filter %s", specific_files)
623
if self.use_record_iter_changes:
624
iter_changes = self.work_tree.iter_changes(self.basis_tree,
625
specific_files=specific_files)
626
iter_changes = self._filter_iter_changes(iter_changes)
627
for file_id, path, fs_hash in self.builder.record_iter_changes(
628
self.work_tree, self.basis_revid, iter_changes):
629
self.work_tree._observed_sha1(file_id, path, fs_hash)
631
# Build the new inventory
632
self._populate_from_inventory()
633
self._record_unselected()
634
self._report_and_accumulate_deletes()
636
def _filter_iter_changes(self, iter_changes):
637
"""Process iter_changes.
639
This method reports on the changes in iter_changes to the user, and
640
converts 'missing' entries in the iter_changes iterator to 'deleted'
641
entries. 'missing' entries have their
643
:param iter_changes: An iter_changes to process.
644
:return: A generator of changes.
646
reporter = self.reporter
647
report_changes = reporter.is_verbose()
649
for change in iter_changes:
651
old_path = change[1][0]
652
new_path = change[1][1]
653
versioned = change[3][1]
655
versioned = change[3][1]
656
if kind is None and versioned:
659
reporter.missing(new_path)
660
deleted_ids.append(change[0])
661
# Reset the new path (None) and new versioned flag (False)
662
change = (change[0], (change[1][0], None), change[2],
663
(change[3][0], False)) + change[4:]
664
elif kind == 'tree-reference':
665
if self.recursive == 'down':
666
self._commit_nested_tree(change[0], change[1][1])
667
if change[3][0] or change[3][1]:
671
reporter.deleted(old_path)
672
elif old_path is None:
673
reporter.snapshot_change('added', new_path)
674
elif old_path != new_path:
675
reporter.renamed('renamed', old_path, new_path)
678
self.work_tree.branch.repository._format.rich_root_data):
679
# Don't report on changes to '' in non rich root
681
reporter.snapshot_change('modified', new_path)
682
self._next_progress_entry()
683
# Unversion IDs that were found to be deleted
684
self.deleted_ids = deleted_ids
686
def _record_unselected(self):
654
# Build the new inventory
655
self._populate_from_inventory(specific_files)
687
657
# If specific files are selected, then all un-selected files must be
688
658
# recorded in their previous state. For more details, see
689
659
# https://lists.ubuntu.com/archives/bazaar/2007q3/028476.html.
690
if self.specific_files or self.exclude:
691
specific_files = self.specific_files or []
692
661
for path, old_ie in self.basis_inv.iter_entries():
693
662
if old_ie.file_id in self.builder.new_inventory:
694
663
# already added - skip.
696
if (is_inside_any(specific_files, path)
697
and not is_inside_any(self.exclude, path)):
698
# was inside the selected path, and not excluded - if not
699
# present it has been deleted so skip.
665
if is_inside_any(specific_files, path):
666
# was inside the selected path, if not present it has been
701
# From here down it was either not selected, or was excluded:
702
# We preserve the entry unaltered.
669
if old_ie.kind == 'directory':
670
self._next_progress_entry()
671
# not in final inv yet, was not in the selected files, so is an
672
# entry to be preserved unaltered.
703
673
ie = old_ie.copy()
704
674
# Note: specific file commits after a merge are currently
705
675
# prohibited. This test is for sanity/safety in case it's
706
676
# required after that changes.
707
677
if len(self.parents) > 1:
708
678
ie.revision = None
709
self.builder.record_entry_contents(ie, self.parent_invs, path,
710
self.basis_tree, None)
679
delta, version_recorded = self.builder.record_entry_contents(
680
ie, self.parent_invs, path, self.basis_tree, None)
682
self.any_entries_changed = True
683
if delta: self._basis_delta.append(delta)
712
685
def _report_and_accumulate_deletes(self):
713
if (isinstance(self.basis_inv, Inventory)
714
and isinstance(self.builder.new_inventory, Inventory)):
715
# the older Inventory classes provide a _byid dict, and building a
716
# set from the keys of this dict is substantially faster than even
717
# getting a set of ids from the inventory
719
# <lifeless> set(dict) is roughly the same speed as
720
# set(iter(dict)) and both are significantly slower than
722
deleted_ids = set(self.basis_inv._byid.keys()) - \
723
set(self.builder.new_inventory._byid.keys())
725
deleted_ids = set(self.basis_inv) - set(self.builder.new_inventory)
686
# XXX: Could the list of deleted paths and ids be instead taken from
687
# _populate_from_inventory?
688
deleted_ids = set(self.basis_inv._byid.keys()) - \
689
set(self.builder.new_inventory._byid.keys())
727
691
self.any_entries_deleted = True
728
692
deleted = [(self.basis_tree.id2path(file_id), file_id)