302
246
if message_callback is None:
303
247
if message is not None:
304
248
if isinstance(message, str):
305
message = message.decode(get_user_encoding())
249
message = message.decode(bzrlib.user_encoding)
306
250
message_callback = lambda x: message
308
252
raise BzrError("The message or message_callback keyword"
309
253
" parameter is required for commit().")
311
255
self.bound_branch = None
256
self.any_entries_changed = False
312
257
self.any_entries_deleted = False
313
if exclude is not None:
314
self.exclude = sorted(
315
minimum_path_selection(exclude))
318
258
self.local = local
319
259
self.master_branch = None
260
self.master_locked = False
320
261
self.recursive = recursive
321
262
self.rev_id = None
322
# self.specific_files is None to indicate no filter, or any iterable to
323
# indicate a filter - [] means no files at all, as per iter_changes.
324
263
if specific_files is not None:
325
264
self.specific_files = sorted(
326
265
minimum_path_selection(specific_files))
328
267
self.specific_files = None
268
self.specific_file_ids = None
330
269
self.allow_pointless = allow_pointless
270
self.revprops = revprops
331
271
self.message_callback = message_callback
332
272
self.timestamp = timestamp
333
273
self.timezone = timezone
334
274
self.committer = committer
335
275
self.strict = strict
336
276
self.verbose = verbose
277
# accumulates an inventory delta to the basis entry, so we can make
278
# just the necessary updates to the workingtree's cached basis.
279
self._basis_delta = []
338
281
self.work_tree.lock_write()
339
operation.add_cleanup(self.work_tree.unlock)
340
self.parents = self.work_tree.get_parent_ids()
341
# We can use record_iter_changes IFF iter_changes is compatible with
342
# the command line parameters, and the repository has fast delta
343
# generation. See bug 347649.
344
self.use_record_iter_changes = (
346
not self.branch.repository._format.supports_tree_reference and
347
(self.branch.repository._format.fast_deltas or
348
len(self.parents) < 2))
349
282
self.pb = bzrlib.ui.ui_factory.nested_progress_bar()
350
operation.add_cleanup(self.pb.finished)
351
283
self.basis_revid = self.work_tree.last_revision()
352
284
self.basis_tree = self.work_tree.basis_tree()
353
285
self.basis_tree.lock_read()
354
operation.add_cleanup(self.basis_tree.unlock)
355
# Cannot commit with conflicts present.
356
if len(self.work_tree.conflicts()) > 0:
357
raise ConflictsInTree
359
# Setup the bound branch variables as needed.
360
self._check_bound_branch(operation, possible_master_transports)
362
# Check that the working tree is up to date
363
old_revno, new_revno = self._check_out_of_date_tree()
365
# Complete configuration setup
366
if reporter is not None:
367
self.reporter = reporter
368
elif self.reporter is None:
369
self.reporter = self._select_reporter()
370
if self.config is None:
371
self.config = self.branch.get_config()
373
self._set_specific_file_ids()
375
# Setup the progress bar. As the number of files that need to be
376
# committed in unknown, progress is reported as stages.
377
# We keep track of entries separately though and include that
378
# information in the progress bar during the relevant stages.
379
self.pb_stage_name = ""
380
self.pb_stage_count = 0
381
self.pb_stage_total = 5
382
if self.bound_branch:
383
self.pb_stage_total += 1
384
self.pb.show_pct = False
385
self.pb.show_spinner = False
386
self.pb.show_eta = False
387
self.pb.show_count = True
388
self.pb.show_bar = True
390
self._gather_parents()
391
# After a merge, a selected file commit is not supported.
392
# See 'bzr help merge' for an explanation as to why.
393
if len(self.parents) > 1 and self.specific_files is not None:
394
raise errors.CannotCommitSelectedFileMerge(self.specific_files)
395
# Excludes are a form of selected file commit.
396
if len(self.parents) > 1 and self.exclude:
397
raise errors.CannotCommitSelectedFileMerge(self.exclude)
399
# Collect the changes
400
self._set_progress_stage("Collecting changes", counter=True)
401
self.builder = self.branch.get_commit_builder(self.parents,
402
self.config, timestamp, timezone, committer, self.revprops, rev_id)
405
self.builder.will_record_deletes()
406
# find the location being committed to
407
if self.bound_branch:
408
master_location = self.master_branch.base
410
master_location = self.branch.base
412
# report the start of the commit
413
self.reporter.started(new_revno, self.rev_id, master_location)
415
self._update_builder_with_changes()
416
self._check_pointless()
418
# TODO: Now the new inventory is known, check for conflicts.
419
# ADHB 2006-08-08: If this is done, populate_new_inv should not add
420
# weave lines, because nothing should be recorded until it is known
421
# that commit will succeed.
422
self._set_progress_stage("Saving data locally")
423
self.builder.finish_inventory()
425
# Prompt the user for a commit message if none provided
426
message = message_callback(self)
427
self.message = message
429
# Add revision data to the local branch
430
self.rev_id = self.builder.commit(self.message)
433
mutter("aborting commit write group because of exception:")
434
trace.log_exception_quietly()
435
note("aborting commit write group: %r" % (e,))
439
self._process_pre_hooks(old_revno, new_revno)
441
# Upload revision data to the master.
442
# this will propagate merged revisions too if needed.
443
if self.bound_branch:
444
self._set_progress_stage("Uploading data to master branch")
445
# 'commit' to the master first so a timeout here causes the
446
# local branch to be out of date
447
self.master_branch.import_last_revision_info(
448
self.branch.repository, new_revno, self.rev_id)
450
# and now do the commit locally.
451
self.branch.set_last_revision_info(new_revno, self.rev_id)
453
# Make the working tree be up to date with the branch. This
454
# includes automatic changes scheduled to be made to the tree, such
455
# as updating its basis and unversioning paths that were missing.
456
self.work_tree.unversion(self.deleted_ids)
457
self._set_progress_stage("Updating the working tree")
458
self.work_tree.update_basis_by_delta(self.rev_id,
459
self.builder.get_basis_delta())
460
self.reporter.completed(new_revno, self.rev_id)
461
self._process_post_hooks(old_revno, new_revno)
287
# Cannot commit with conflicts present.
288
if len(self.work_tree.conflicts()) > 0:
289
raise ConflictsInTree
291
# Setup the bound branch variables as needed.
292
self._check_bound_branch()
294
# Check that the working tree is up to date
295
old_revno, new_revno = self._check_out_of_date_tree()
297
# Complete configuration setup
298
if reporter is not None:
299
self.reporter = reporter
300
elif self.reporter is None:
301
self.reporter = self._select_reporter()
302
if self.config is None:
303
self.config = self.branch.get_config()
305
# If provided, ensure the specified files are versioned
306
if self.specific_files is not None:
307
# Note: This routine is being called because it raises
308
# PathNotVersionedError as a side effect of finding the IDs. We
309
# later use the ids we found as input to the working tree
310
# inventory iterator, so we only consider those ids rather than
311
# examining the whole tree again.
312
# XXX: Dont we have filter_unversioned to do this more
314
self.specific_file_ids = tree.find_ids_across_trees(
315
specific_files, [self.basis_tree, self.work_tree])
317
# Setup the progress bar. As the number of files that need to be
318
# committed in unknown, progress is reported as stages.
319
# We keep track of entries separately though and include that
320
# information in the progress bar during the relevant stages.
321
self.pb_stage_name = ""
322
self.pb_stage_count = 0
323
self.pb_stage_total = 5
324
if self.bound_branch:
325
self.pb_stage_total += 1
326
self.pb.show_pct = False
327
self.pb.show_spinner = False
328
self.pb.show_eta = False
329
self.pb.show_count = True
330
self.pb.show_bar = True
332
# After a merge, a selected file commit is not supported.
333
# See 'bzr help merge' for an explanation as to why.
334
self.basis_inv = self.basis_tree.inventory
335
self._gather_parents()
336
if len(self.parents) > 1 and self.specific_files:
337
raise errors.CannotCommitSelectedFileMerge(self.specific_files)
339
# Collect the changes
340
self._set_progress_stage("Collecting changes",
341
entries_title="Directory")
342
self.builder = self.branch.get_commit_builder(self.parents,
343
self.config, timestamp, timezone, committer, revprops, rev_id)
346
# find the location being committed to
347
if self.bound_branch:
348
master_location = self.master_branch.base
350
master_location = self.branch.base
352
# report the start of the commit
353
self.reporter.started(new_revno, self.rev_id, master_location)
355
self._update_builder_with_changes()
356
self._report_and_accumulate_deletes()
357
self._check_pointless()
359
# TODO: Now the new inventory is known, check for conflicts.
360
# ADHB 2006-08-08: If this is done, populate_new_inv should not add
361
# weave lines, because nothing should be recorded until it is known
362
# that commit will succeed.
363
self._set_progress_stage("Saving data locally")
364
self.builder.finish_inventory()
366
# Prompt the user for a commit message if none provided
367
message = message_callback(self)
368
assert isinstance(message, unicode), type(message)
369
self.message = message
370
self._escape_commit_message()
372
# Add revision data to the local branch
373
self.rev_id = self.builder.commit(self.message)
379
self._process_pre_hooks(old_revno, new_revno)
381
# Upload revision data to the master.
382
# this will propagate merged revisions too if needed.
383
if self.bound_branch:
384
self._set_progress_stage("Uploading data to master branch")
385
self.master_branch.repository.fetch(self.branch.repository,
386
revision_id=self.rev_id)
387
# now the master has the revision data
388
# 'commit' to the master first so a timeout here causes the
389
# local branch to be out of date
390
self.master_branch.set_last_revision_info(new_revno,
393
# and now do the commit locally.
394
self.branch.set_last_revision_info(new_revno, self.rev_id)
396
# Make the working tree up to date with the branch
397
self._set_progress_stage("Updating the working tree")
398
self.work_tree.update_basis_by_delta(self.rev_id,
400
self.reporter.completed(new_revno, self.rev_id)
401
self._process_post_hooks(old_revno, new_revno)
462
404
return self.rev_id
464
406
def _select_reporter(self):
618
562
old_revno, old_revid, new_revno, self.rev_id,
619
563
tree_delta, future_tree)
566
"""Cleanup any open locks, progress bars etc."""
567
cleanups = [self._cleanup_bound_branch,
568
self.basis_tree.unlock,
569
self.work_tree.unlock,
571
found_exception = None
572
for cleanup in cleanups:
575
# we want every cleanup to run no matter what.
576
# so we have a catchall here, but we will raise the
577
# last encountered exception up the stack: and
578
# typically this will be useful enough.
581
if found_exception is not None:
582
# don't do a plan raise, because the last exception may have been
583
# trashed, e is our sure-to-work exception even though it loses the
584
# full traceback. XXX: RBC 20060421 perhaps we could check the
585
# exc_info and if its the same one do a plain raise otherwise
586
# 'raise e' as we do now.
589
def _cleanup_bound_branch(self):
590
"""Executed at the end of a try/finally to cleanup a bound branch.
592
If the branch wasn't bound, this is a no-op.
593
If it was, it resents self.branch to the local branch, instead
596
if not self.bound_branch:
598
if self.master_locked:
599
self.master_branch.unlock()
601
def _escape_commit_message(self):
602
"""Replace xml-incompatible control characters."""
603
# FIXME: RBC 20060419 this should be done by the revision
604
# serialiser not by commit. Then we can also add an unescaper
605
# in the deserializer and start roundtripping revision messages
606
# precisely. See repository_implementations/test_repository.py
608
# Python strings can include characters that can't be
609
# represented in well-formed XML; escape characters that
610
# aren't listed in the XML specification
611
# (http://www.w3.org/TR/REC-xml/#NT-Char).
612
self.message, escape_count = re.subn(
613
u'[^\x09\x0A\x0D\u0020-\uD7FF\uE000-\uFFFD]+',
614
lambda match: match.group(0).encode('unicode_escape'),
617
self.reporter.escaped(escape_count, self.message)
621
619
def _gather_parents(self):
622
620
"""Record the parents of a merge for merge detection."""
623
# TODO: Make sure that this list doesn't contain duplicate
621
# TODO: Make sure that this list doesn't contain duplicate
624
622
# entries and the order is preserved when doing this.
625
if self.use_record_iter_changes:
627
self.basis_inv = self.basis_tree.inventory
623
self.parents = self.work_tree.get_parent_ids()
628
624
self.parent_invs = [self.basis_inv]
629
625
for revision in self.parents[1:]:
630
626
if self.branch.repository.has_revision(revision):
637
633
def _update_builder_with_changes(self):
638
634
"""Update the commit builder with the data about what has changed.
640
exclude = self.exclude
636
# Build the revision inventory.
638
# This starts by creating a new empty inventory. Depending on
639
# which files are selected for commit, and what is present in the
640
# current tree, the new inventory is populated. inventory entries
641
# which are candidates for modification have their revision set to
642
# None; inventory entries that are carried over untouched have their
643
# revision set to their prior value.
645
# ESEPARATIONOFCONCERNS: this function is diffing and using the diff
646
# results to create a new inventory at the same time, which results
647
# in bugs like #46635. Any reason not to use/enhance Tree.changes_from?
641
650
specific_files = self.specific_files
642
651
mutter("Selecting files for commit with filter %s", specific_files)
645
if self.use_record_iter_changes:
646
iter_changes = self.work_tree.iter_changes(self.basis_tree,
647
specific_files=specific_files)
648
iter_changes = self._filter_iter_changes(iter_changes)
649
for file_id, path, fs_hash in self.builder.record_iter_changes(
650
self.work_tree, self.basis_revid, iter_changes):
651
self.work_tree._observed_sha1(file_id, path, fs_hash)
653
# Build the new inventory
654
self._populate_from_inventory()
655
self._record_unselected()
656
self._report_and_accumulate_deletes()
658
def _filter_iter_changes(self, iter_changes):
659
"""Process iter_changes.
661
This method reports on the changes in iter_changes to the user, and
662
converts 'missing' entries in the iter_changes iterator to 'deleted'
663
entries. 'missing' entries have their
665
:param iter_changes: An iter_changes to process.
666
:return: A generator of changes.
668
reporter = self.reporter
669
report_changes = reporter.is_verbose()
671
for change in iter_changes:
673
old_path = change[1][0]
674
new_path = change[1][1]
675
versioned = change[3][1]
677
versioned = change[3][1]
678
if kind is None and versioned:
681
reporter.missing(new_path)
682
deleted_ids.append(change[0])
683
# Reset the new path (None) and new versioned flag (False)
684
change = (change[0], (change[1][0], None), change[2],
685
(change[3][0], False)) + change[4:]
686
elif kind == 'tree-reference':
687
if self.recursive == 'down':
688
self._commit_nested_tree(change[0], change[1][1])
689
if change[3][0] or change[3][1]:
693
reporter.deleted(old_path)
694
elif old_path is None:
695
reporter.snapshot_change('added', new_path)
696
elif old_path != new_path:
697
reporter.renamed('renamed', old_path, new_path)
700
self.work_tree.branch.repository._format.rich_root_data):
701
# Don't report on changes to '' in non rich root
703
reporter.snapshot_change('modified', new_path)
704
self._next_progress_entry()
705
# Unversion IDs that were found to be deleted
706
self.deleted_ids = deleted_ids
708
def _record_unselected(self):
653
# Build the new inventory
654
self._populate_from_inventory(specific_files)
709
656
# If specific files are selected, then all un-selected files must be
710
657
# recorded in their previous state. For more details, see
711
658
# https://lists.ubuntu.com/archives/bazaar/2007q3/028476.html.
712
if self.specific_files or self.exclude:
713
specific_files = self.specific_files or []
714
660
for path, old_ie in self.basis_inv.iter_entries():
715
661
if old_ie.file_id in self.builder.new_inventory:
716
662
# already added - skip.
718
if (is_inside_any(specific_files, path)
719
and not is_inside_any(self.exclude, path)):
720
# was inside the selected path, and not excluded - if not
721
# present it has been deleted so skip.
664
if is_inside_any(specific_files, path):
665
# was inside the selected path, if not present it has been
723
# From here down it was either not selected, or was excluded:
724
# We preserve the entry unaltered.
668
if old_ie.kind == 'directory':
669
self._next_progress_entry()
670
# not in final inv yet, was not in the selected files, so is an
671
# entry to be preserved unaltered.
725
672
ie = old_ie.copy()
726
673
# Note: specific file commits after a merge are currently
727
674
# prohibited. This test is for sanity/safety in case it's
728
675
# required after that changes.
729
676
if len(self.parents) > 1:
730
677
ie.revision = None
731
self.builder.record_entry_contents(ie, self.parent_invs, path,
732
self.basis_tree, None)
678
delta, version_recorded = self.builder.record_entry_contents(
679
ie, self.parent_invs, path, self.basis_tree, None)
681
self.any_entries_changed = True
682
if delta: self._basis_delta.append(delta)
734
684
def _report_and_accumulate_deletes(self):
735
if (isinstance(self.basis_inv, Inventory)
736
and isinstance(self.builder.new_inventory, Inventory)):
737
# the older Inventory classes provide a _byid dict, and building a
738
# set from the keys of this dict is substantially faster than even
739
# getting a set of ids from the inventory
741
# <lifeless> set(dict) is roughly the same speed as
742
# set(iter(dict)) and both are significantly slower than
744
deleted_ids = set(self.basis_inv._byid.keys()) - \
745
set(self.builder.new_inventory._byid.keys())
747
deleted_ids = set(self.basis_inv) - set(self.builder.new_inventory)
685
# XXX: Could the list of deleted paths and ids be instead taken from
686
# _populate_from_inventory?
687
deleted_ids = set(self.basis_inv._byid.keys()) - \
688
set(self.builder.new_inventory._byid.keys())
749
690
self.any_entries_deleted = True
750
691
deleted = [(self.basis_tree.id2path(file_id), file_id)