87
84
self.branch = self.bzrdir.open_branch()
88
85
except errors.NotBranchError:
89
86
# Nothing to check here
90
self.fixed_branch_history = None
92
ui.ui_factory.note('Reconciling branch %s' % self.branch.base)
88
self.pb.note('Reconciling branch %s',
93
90
branch_reconciler = self.branch.reconcile(thorough=True)
94
91
self.fixed_branch_history = branch_reconciler.fixed_history
96
93
def _reconcile_repository(self):
97
94
self.repo = self.bzrdir.find_repository()
98
ui.ui_factory.note('Reconciling repository %s' %
95
self.pb.note('Reconciling repository %s',
96
self.repo.bzrdir.root_transport.base)
100
97
self.pb.update("Reconciling repository", 0, 1)
101
98
repo_reconciler = self.repo.reconcile(thorough=True)
102
99
self.inconsistent_parents = repo_reconciler.inconsistent_parents
103
100
self.garbage_inventories = repo_reconciler.garbage_inventories
104
101
if repo_reconciler.aborted:
106
103
'Reconcile aborted: revision index has inconsistent parents.')
108
105
'Run "bzr check" for more details.')
110
ui.ui_factory.note('Reconciliation complete.')
107
self.pb.note('Reconciliation complete.')
113
110
class BranchReconciler(object):
150
141
# set_revision_history, as this will regenerate it again.
151
142
# Not really worth a whole BranchReconciler class just for this,
153
ui.ui_factory.note('Fixing last revision info %s => %s' % (
154
last_revno, len(real_history)))
144
self.pb.note('Fixing last revision info %s => %s',
145
last_revno, len(real_history))
155
146
self.branch.set_last_revision_info(len(real_history),
156
147
last_revision_id)
158
149
self.fixed_history = False
159
ui.ui_factory.note('revision_history ok.')
150
self.pb.note('revision_history ok.')
162
153
class RepoReconciler(object):
163
154
"""Reconciler that reconciles a repository.
165
156
The goal of repository reconciliation is to make any derived data
166
consistent with the core data committed by a user. This can involve
157
consistent with the core data committed by a user. This can involve
167
158
reindexing, or removing unreferenced data if that can interfere with
168
159
queries in a given repository.
234
223
# put a revision into the graph.
235
224
self._graph_revision(rev_id)
236
225
self._check_garbage_inventories()
237
# if there are no inconsistent_parents and
226
# if there are no inconsistent_parents and
238
227
# (no garbage inventories or we are not doing a thorough check)
239
if (not self.inconsistent_parents and
228
if (not self.inconsistent_parents and
240
229
(not self.garbage_inventories or not self.thorough)):
241
ui.ui_factory.note('Inventory ok.')
230
self.pb.note('Inventory ok.')
243
self.pb.update('Backing up inventory', 0, 0)
244
self.repo._backup_inventory()
245
ui.ui_factory.note('Backup inventory created.')
246
new_inventories = self.repo._temp_inventories()
232
self.pb.update('Backing up inventory...', 0, 0)
233
self.repo.control_weaves.copy(self.inventory, 'inventory.backup', self.repo.get_transaction())
234
self.pb.note('Backup Inventory created.')
235
# asking for '' should never return a non-empty weave
236
new_inventory_vf = self.repo.control_weaves.get_empty('inventory.new',
237
self.repo.get_transaction())
248
239
# we have topological order of revisions and non ghost parents ready.
249
240
self._setup_steps(len(self._rev_graph))
250
revision_keys = [(rev_id,) for rev_id in topo_sort(self._rev_graph)]
251
stream = self._change_inv_parents(
252
self.inventory.get_record_stream(revision_keys, 'unordered', True),
253
self._new_inv_parents,
255
new_inventories.insert_record_stream(stream)
256
# if this worked, the set of new_inventories.keys should equal
241
for rev_id in TopoSorter(self._rev_graph.items()).iter_topo_order():
242
parents = self._rev_graph[rev_id]
243
# double check this really is in topological order.
244
unavailable = [p for p in parents if p not in new_inventory_vf]
245
assert len(unavailable) == 0
246
# this entry has all the non ghost parents in the inventory
248
self._reweave_step('adding inventories')
249
if isinstance(new_inventory_vf, WeaveFile):
250
# It's really a WeaveFile, but we call straight into the
251
# Weave's add method to disable the auto-write-out behaviour.
252
# This is done to avoid a revision_count * time-to-write additional overhead on
254
new_inventory_vf._check_write_ok()
255
Weave._add_lines(new_inventory_vf, rev_id, parents,
256
self.inventory.get_lines(rev_id), None, None, None, False, True)
258
new_inventory_vf.add_lines(rev_id, parents, self.inventory.get_lines(rev_id))
260
if isinstance(new_inventory_vf, WeaveFile):
261
new_inventory_vf._save()
262
# if this worked, the set of new_inventory_vf.names should equal
258
if not (set(new_inventories.keys()) ==
259
set([(revid,) for revid in self.pending])):
260
raise AssertionError()
264
assert set(new_inventory_vf.versions()) == self.pending
261
265
self.pb.update('Writing weave')
262
self.repo._activate_new_inventory()
266
self.repo.control_weaves.copy(new_inventory_vf, 'inventory', self.repo.get_transaction())
267
self.repo.control_weaves.delete('inventory.new', self.repo.get_transaction())
263
268
self.inventory = None
264
ui.ui_factory.note('Inventory regenerated.')
266
def _new_inv_parents(self, revision_key):
267
"""Lookup ghost-filtered parents for revision_key."""
268
# Use the filtered ghostless parents list:
269
return tuple([(revid,) for revid in self._rev_graph[revision_key[-1]]])
271
def _change_inv_parents(self, stream, get_parents, all_revision_keys):
272
"""Adapt a record stream to reconcile the parents."""
273
for record in stream:
274
wanted_parents = get_parents(record.key)
275
if wanted_parents and wanted_parents[0] not in all_revision_keys:
276
# The check for the left most parent only handles knit
277
# compressors, but this code only applies to knit and weave
278
# repositories anyway.
279
bytes = record.get_bytes_as('fulltext')
280
yield FulltextContentFactory(record.key, wanted_parents, record.sha1, bytes)
282
adapted_record = AdapterFactory(record.key, wanted_parents, record)
284
self._reweave_step('adding inventories')
269
self.pb.note('Inventory regenerated.')
286
271
def _setup_steps(self, new_total):
287
272
"""Setup the markers we need to control the progress bar."""
302
288
mutter('found ghost %s', parent)
303
289
self._rev_graph[rev_id] = parents
290
if self._parents_are_inconsistent(rev_id, parents):
291
self.inconsistent_parents += 1
292
mutter('Inconsistent inventory parents: id {%s} '
293
'inventory claims %r, '
294
'available parents are %r, '
295
'unavailable parents are %r',
297
set(self.inventory.get_parent_map([rev_id])[rev_id]),
299
set(rev.parent_ids).difference(set(parents)))
301
def _parents_are_inconsistent(self, rev_id, parents):
302
"""Return True if the parents list of rev_id does not match the weave.
304
This detects inconsistencies based on the self.thorough value:
305
if thorough is on, the first parent value is checked as well as ghost
307
Otherwise only the ghost differences are evaluated.
309
weave_parents = self.inventory.get_parent_map([rev_id])[rev_id]
310
weave_missing_old_ghosts = set(weave_parents) != set(parents)
311
first_parent_is_wrong = (
312
len(weave_parents) and len(parents) and
313
parents[0] != weave_parents[0])
315
return weave_missing_old_ghosts or first_parent_is_wrong
317
return weave_missing_old_ghosts
305
319
def _check_garbage_inventories(self):
306
320
"""Check for garbage inventories which we cannot trust
355
367
def _load_indexes(self):
356
368
"""Load indexes for the reconciliation."""
357
369
self.transaction = self.repo.get_transaction()
358
self.pb.update('Reading indexes', 0, 2)
359
self.inventory = self.repo.inventories
360
self.pb.update('Reading indexes', 1, 2)
370
self.pb.update('Reading indexes.', 0, 2)
371
self.inventory = self.repo.get_inventory_weave()
372
self.pb.update('Reading indexes.', 1, 2)
361
373
self.repo._check_for_inconsistent_revision_parents()
362
self.revisions = self.repo.revisions
363
self.pb.update('Reading indexes', 2, 2)
374
self.revisions = self.repo._revision_store.get_revision_file(self.transaction)
375
self.pb.update('Reading indexes.', 2, 2)
365
377
def _gc_inventory(self):
366
378
"""Remove inventories that are not referenced from the revision store."""
367
self.pb.update('Checking unused inventories', 0, 1)
379
self.pb.update('Checking unused inventories.', 0, 1)
368
380
self._check_garbage_inventories()
369
self.pb.update('Checking unused inventories', 1, 3)
381
self.pb.update('Checking unused inventories.', 1, 3)
370
382
if not self.garbage_inventories:
371
ui.ui_factory.note('Inventory ok.')
383
self.pb.note('Inventory ok.')
373
self.pb.update('Backing up inventory', 0, 0)
374
self.repo._backup_inventory()
375
ui.ui_factory.note('Backup Inventory created')
385
self.pb.update('Backing up inventory...', 0, 0)
386
self.repo.control_weaves.copy(self.inventory, 'inventory.backup', self.transaction)
387
self.pb.note('Backup Inventory created.')
376
388
# asking for '' should never return a non-empty weave
377
new_inventories = self.repo._temp_inventories()
389
new_inventory_vf = self.repo.control_weaves.get_empty('inventory.new',
378
392
# we have topological order of revisions and non ghost parents ready.
379
graph = self.revisions.get_parent_map(self.revisions.keys())
380
revision_keys = topo_sort(graph)
381
revision_ids = [key[-1] for key in revision_keys]
382
self._setup_steps(len(revision_keys))
383
stream = self._change_inv_parents(
384
self.inventory.get_record_stream(revision_keys, 'unordered', True),
387
new_inventories.insert_record_stream(stream)
393
self._setup_steps(len(self.revisions))
394
revision_ids = self.revisions.versions()
395
graph = self.revisions.get_parent_map(revision_ids)
396
for rev_id in TopoSorter(graph.items()).iter_topo_order():
397
parents = graph[rev_id]
398
# double check this really is in topological order, ignoring existing ghosts.
399
unavailable = [p for p in parents if p not in new_inventory_vf and
401
assert len(unavailable) == 0
402
# this entry has all the non ghost parents in the inventory
404
self._reweave_step('adding inventories')
405
# ugly but needed, weaves are just way tooooo slow else.
406
new_inventory_vf.add_lines_with_ghosts(rev_id, parents,
407
self.inventory.get_lines(rev_id))
388
409
# if this worked, the set of new_inventory_vf.names should equal
389
# the revisionds list
390
if not(set(new_inventories.keys()) == set(revision_keys)):
391
raise AssertionError()
411
assert set(new_inventory_vf.versions()) == set(self.revisions.versions())
392
412
self.pb.update('Writing weave')
393
self.repo._activate_new_inventory()
413
self.repo.control_weaves.copy(new_inventory_vf, 'inventory', self.transaction)
414
self.repo.control_weaves.delete('inventory.new', self.transaction)
394
415
self.inventory = None
395
ui.ui_factory.note('Inventory regenerated.')
416
self.pb.note('Inventory regenerated.')
418
def _check_garbage_inventories(self):
419
"""Check for garbage inventories which we cannot trust
421
We cant trust them because their pre-requisite file data may not
422
be present - all we know is that their revision was not installed.
424
inventories = set(self.inventory.versions())
425
revisions = set(self.revisions.versions())
426
garbage = inventories.difference(revisions)
427
self.garbage_inventories = len(garbage)
428
for revision_id in garbage:
429
mutter('Garbage inventory {%s} found.', revision_id)
397
431
def _fix_text_parents(self):
398
432
"""Fix bad versionedfile parent entries.
404
438
parent lists, and replaces the versionedfile with a corrected version.
406
440
transaction = self.repo.get_transaction()
407
versions = [key[-1] for key in self.revisions.keys()]
441
versions = self.revisions.versions()
408
442
mutter('Prepopulating revision text cache with %d revisions',
410
444
vf_checker = self.repo._get_versioned_file_checker()
411
bad_parents, unused_versions = vf_checker.check_file_version_parents(
412
self.repo.texts, self.pb)
413
text_index = vf_checker.text_index
414
per_id_bad_parents = {}
415
for key in unused_versions:
416
# Ensure that every file with unused versions gets rewritten.
417
# NB: This is really not needed, reconcile != pack.
418
per_id_bad_parents[key[0]] = {}
419
# Generate per-knit/weave data.
420
for key, details in bad_parents.iteritems():
423
knit_parents = tuple([parent[-1] for parent in details[0]])
424
correct_parents = tuple([parent[-1] for parent in details[1]])
425
file_details = per_id_bad_parents.setdefault(file_id, {})
426
file_details[rev_id] = (knit_parents, correct_parents)
427
file_id_versions = {}
428
for text_key in text_index:
429
versions_list = file_id_versions.setdefault(text_key[0], [])
430
versions_list.append(text_key[1])
431
# Do the reconcile of individual weaves.
432
for num, file_id in enumerate(per_id_bad_parents):
445
# List all weaves before altering, to avoid race conditions when we
446
# delete unused weaves.
447
weaves = list(enumerate(self.repo.weave_store))
448
for num, file_id in weaves:
433
449
self.pb.update('Fixing text parents', num,
434
len(per_id_bad_parents))
435
versions_with_bad_parents = per_id_bad_parents[file_id]
436
id_unused_versions = set(key[-1] for key in unused_versions
437
if key[0] == file_id)
438
if file_id in file_id_versions:
439
file_versions = file_id_versions[file_id]
441
# This id was present in the disk store but is not referenced
442
# by any revision at all.
444
self._fix_text_parent(file_id, versions_with_bad_parents,
445
id_unused_versions, file_versions)
450
len(self.repo.weave_store))
451
vf = self.repo.weave_store.get_weave(file_id, transaction)
452
versions_with_bad_parents, unused_versions = \
453
vf_checker.check_file_version_parents(vf, file_id)
454
if (len(versions_with_bad_parents) == 0 and
455
len(unused_versions) == 0):
457
full_text_versions = set()
458
self._fix_text_parent(file_id, vf, versions_with_bad_parents,
459
full_text_versions, unused_versions)
447
def _fix_text_parent(self, file_id, versions_with_bad_parents,
448
unused_versions, all_versions):
461
def _fix_text_parent(self, file_id, vf, versions_with_bad_parents,
462
full_text_versions, unused_versions):
449
463
"""Fix bad versionedfile entries in a single versioned file."""
450
464
mutter('fixing text parent: %r (%d versions)', file_id,
451
465
len(versions_with_bad_parents))
452
mutter('(%d are unused)', len(unused_versions))
453
new_file_id = 'temp:%s' % file_id
466
mutter('(%d need to be full texts, %d are unused)',
467
len(full_text_versions), len(unused_versions))
468
new_vf = self.repo.weave_store.get_empty('temp:%s' % file_id,
456
for version in all_versions:
471
for version in vf.versions():
457
472
if version in unused_versions:
459
474
elif version in versions_with_bad_parents:
460
475
parents = versions_with_bad_parents[version][1]
462
pmap = self.repo.texts.get_parent_map([(file_id, version)])
463
parents = [key[-1] for key in pmap[(file_id, version)]]
464
new_parents[(new_file_id, version)] = [
465
(new_file_id, parent) for parent in parents]
466
needed_keys.add((file_id, version))
467
def fix_parents(stream):
468
for record in stream:
469
bytes = record.get_bytes_as('fulltext')
470
new_key = (new_file_id, record.key[-1])
471
parents = new_parents[new_key]
472
yield FulltextContentFactory(new_key, parents, record.sha1, bytes)
473
stream = self.repo.texts.get_record_stream(needed_keys, 'topological', True)
474
self.repo._remove_file_id(new_file_id)
475
self.repo.texts.insert_record_stream(fix_parents(stream))
476
self.repo._remove_file_id(file_id)
478
self.repo._move_file_id(new_file_id, file_id)
477
parents = vf.get_parent_map([version])[version]
478
new_parents[version] = parents
479
if not len(new_parents):
480
# No used versions, remove the VF.
481
self.repo.weave_store.delete(file_id, self.transaction)
483
for version in TopoSorter(new_parents.items()).iter_topo_order():
484
lines = vf.get_lines(version)
485
parents = new_parents[version]
486
if parents and (parents[0] in full_text_versions):
487
# Force this record to be a fulltext, not a delta.
488
new_vf._add(version, lines, parents, False,
489
None, None, None, False)
491
new_vf.add_lines(version, parents, lines)
492
self.repo.weave_store.copy(new_vf, file_id, self.transaction)
493
self.repo.weave_store.delete('temp:%s' % file_id, self.transaction)
481
496
class PackReconciler(RepoReconciler):
503
518
collection = self.repo._pack_collection
504
519
collection.ensure_loaded()
505
520
collection.lock_names()
506
self.add_cleanup(collection._unlock_names)
507
packs = collection.all_packs()
508
all_revisions = self.repo.all_revision_ids()
509
total_inventories = len(list(
510
collection.inventory_index.combined_index.iter_all_entries()))
511
if len(all_revisions):
512
new_pack = self.repo._reconcile_pack(collection, packs,
513
".reconcile", all_revisions, self.pb)
514
if new_pack is not None:
522
packs = collection.all_packs()
523
all_revisions = self.repo.all_revision_ids()
524
total_inventories = len(list(
525
collection.inventory_index.combined_index.iter_all_entries()))
526
if len(all_revisions):
527
self._packer = repofmt.pack_repo.ReconcilePacker(
528
collection, packs, ".reconcile", all_revisions)
529
new_pack = self._packer.pack(pb=self.pb)
530
if new_pack is not None:
531
self._discard_and_save(packs)
533
# only make a new pack when there is data to copy.
515
534
self._discard_and_save(packs)
517
# only make a new pack when there is data to copy.
518
self._discard_and_save(packs)
519
self.garbage_inventories = total_inventories - len(list(
520
collection.inventory_index.combined_index.iter_all_entries()))
535
self.garbage_inventories = total_inventories - len(list(
536
collection.inventory_index.combined_index.iter_all_entries()))
538
collection._unlock_names()
522
540
def _discard_and_save(self, packs):
523
541
"""Discard some packs from the repository.