1
# Copyright (C) 2007-2010 Canonical Ltd
1
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
65
64
InMemoryGraphIndex,
67
from bzrlib.lock import LogicalLockResult
68
66
from bzrlib.repofmt.knitrepo import KnitRepository
69
67
from bzrlib.repository import (
71
69
MetaDirRepositoryFormat,
73
RepositoryWriteLockResult,
589
586
flush_func=flush_func)
590
587
self.add_callback = None
589
def replace_indices(self, index_to_pack, indices):
590
"""Replace the current mappings with fresh ones.
592
This should probably not be used eventually, rather incremental add and
593
removal of indices. It has been added during refactoring of existing
596
:param index_to_pack: A mapping from index objects to
597
(transport, name) tuples for the pack file data.
598
:param indices: A list of indices.
600
# refresh the revision pack map dict without replacing the instance.
601
self.index_to_pack.clear()
602
self.index_to_pack.update(index_to_pack)
603
# XXX: API break - clearly a 'replace' method would be good?
604
self.combined_index._indices[:] = indices
605
# the current add nodes callback for the current writable index if
607
self.add_callback = None
592
609
def add_index(self, index, pack):
593
610
"""Add index to the aggregate, which is an index for Pack pack.
601
618
# expose it to the index map
602
619
self.index_to_pack[index] = pack.access_tuple()
603
620
# put it at the front of the linear index list
604
self.combined_index.insert_index(0, index, pack.name)
621
self.combined_index.insert_index(0, index)
606
623
def add_writable_index(self, index, pack):
607
624
"""Add an index which is able to have data added to it.
627
644
self.data_access.set_writer(None, None, (None, None))
628
645
self.index_to_pack.clear()
629
646
del self.combined_index._indices[:]
630
del self.combined_index._index_names[:]
631
647
self.add_callback = None
633
def remove_index(self, index):
649
def remove_index(self, index, pack):
634
650
"""Remove index from the indices used to answer queries.
636
652
:param index: An index from the pack parameter.
653
:param pack: A Pack instance.
638
655
del self.index_to_pack[index]
639
pos = self.combined_index._indices.index(index)
640
del self.combined_index._indices[pos]
641
del self.combined_index._index_names[pos]
656
self.combined_index._indices.remove(index)
642
657
if (self.add_callback is not None and
643
658
getattr(index, 'add_nodes', None) == self.add_callback):
644
659
self.add_callback = None
1400
1415
self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1401
1416
self.text_index = AggregateIndex(self.reload_pack_names, flush)
1402
1417
self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1403
all_indices = [self.revision_index, self.inventory_index,
1404
self.text_index, self.signature_index]
1405
1418
if use_chk_index:
1406
1419
self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1407
all_indices.append(self.chk_index)
1409
1421
# used to determine if we're using a chk_index elsewhere.
1410
1422
self.chk_index = None
1411
# Tell all the CombinedGraphIndex objects about each other, so they can
1412
# share hints about which pack names to search first.
1413
all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1414
for combined_idx in all_combined:
1415
combined_idx.set_sibling_indices(
1416
set(all_combined).difference([combined_idx]))
1417
1423
# resumed packs
1418
1424
self._resumed_packs = []
1421
return '%s(%r)' % (self.__class__.__name__, self.repo)
1423
1426
def add_pack_to_memory(self, pack):
1424
1427
"""Make a Pack object available to the repository to satisfy queries.
1539
1542
self._remove_pack_from_memory(pack)
1540
1543
# record the newly available packs and stop advertising the old
1542
to_be_obsoleted = []
1543
for _, packs in pack_operations:
1544
to_be_obsoleted.extend(packs)
1545
result = self._save_pack_names(clear_obsolete_packs=True,
1546
obsolete_packs=to_be_obsoleted)
1545
result = self._save_pack_names(clear_obsolete_packs=True)
1546
# Move the old packs out of the way now they are no longer referenced.
1547
for revision_count, packs in pack_operations:
1548
self._obsolete_packs(packs)
1549
1551
def _flush_new_pack(self):
1562
1564
"""Is the collection already packed?"""
1563
1565
return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1565
def pack(self, hint=None, clean_obsolete_packs=False):
1567
def pack(self, hint=None):
1566
1568
"""Pack the pack collection totally."""
1567
1569
self.ensure_loaded()
1568
1570
total_packs = len(self._names)
1584
1586
pack_operations[-1][1].append(pack)
1585
1587
self._execute_pack_operations(pack_operations, OptimisingPacker)
1587
if clean_obsolete_packs:
1588
self._clear_obsolete_packs()
1590
1589
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1591
1590
"""Plan a pack operation.
1786
1785
:param return: None.
1788
1787
for pack in packs:
1790
pack.pack_transport.rename(pack.file_name(),
1791
'../obsolete_packs/' + pack.file_name())
1792
except (errors.PathError, errors.TransportError), e:
1793
# TODO: Should these be warnings or mutters?
1794
mutter("couldn't rename obsolete pack, skipping it:\n%s"
1788
pack.pack_transport.rename(pack.file_name(),
1789
'../obsolete_packs/' + pack.file_name())
1796
1790
# TODO: Probably needs to know all possible indices for this pack
1797
1791
# - or maybe list the directory and move all indices matching this
1798
1792
# name whether we recognize it or not?
1800
1794
if self.chk_index is not None:
1801
1795
suffixes.append('.cix')
1802
1796
for suffix in suffixes:
1804
self._index_transport.rename(pack.name + suffix,
1805
'../obsolete_packs/' + pack.name + suffix)
1806
except (errors.PathError, errors.TransportError), e:
1807
mutter("couldn't rename obsolete index, skipping it:\n%s"
1797
self._index_transport.rename(pack.name + suffix,
1798
'../obsolete_packs/' + pack.name + suffix)
1810
1800
def pack_distribution(self, total_revisions):
1811
1801
"""Generate a list of the number of revisions to put in each pack.
1837
1827
self._remove_pack_indices(pack)
1838
1828
self.packs.remove(pack)
1840
def _remove_pack_indices(self, pack, ignore_missing=False):
1841
"""Remove the indices for pack from the aggregated indices.
1843
:param ignore_missing: Suppress KeyErrors from calling remove_index.
1845
for index_type in Pack.index_definitions.keys():
1846
attr_name = index_type + '_index'
1847
aggregate_index = getattr(self, attr_name)
1848
if aggregate_index is not None:
1849
pack_index = getattr(pack, attr_name)
1851
aggregate_index.remove_index(pack_index)
1830
def _remove_pack_indices(self, pack):
1831
"""Remove the indices for pack from the aggregated indices."""
1832
self.revision_index.remove_index(pack.revision_index, pack)
1833
self.inventory_index.remove_index(pack.inventory_index, pack)
1834
self.text_index.remove_index(pack.text_index, pack)
1835
self.signature_index.remove_index(pack.signature_index, pack)
1836
if self.chk_index is not None:
1837
self.chk_index.remove_index(pack.chk_index, pack)
1857
1839
def reset(self):
1858
1840
"""Clear all cached data."""
1891
1873
disk_nodes = set()
1892
1874
for index, key, value in self._iter_disk_pack_index():
1893
1875
disk_nodes.add((key, value))
1894
orig_disk_nodes = set(disk_nodes)
1896
1877
# do a two-way diff against our original content
1897
1878
current_nodes = set()
1910
1891
disk_nodes.difference_update(deleted_nodes)
1911
1892
disk_nodes.update(new_nodes)
1913
return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
1894
return disk_nodes, deleted_nodes, new_nodes
1915
1896
def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1916
1897
"""Given the correct set of pack files, update our saved info.
1956
1937
added.append(name)
1957
1938
return removed, added, modified
1959
def _save_pack_names(self, clear_obsolete_packs=False, obsolete_packs=None):
1940
def _save_pack_names(self, clear_obsolete_packs=False):
1960
1941
"""Save the list of packs.
1962
1943
This will take out the mutex around the pack names list for the
1967
1948
:param clear_obsolete_packs: If True, clear out the contents of the
1968
1949
obsolete_packs directory.
1969
:param obsolete_packs: Packs that are obsolete once the new pack-names
1970
file has been written.
1971
1950
:return: A list of the names saved that were not previously on disk.
1973
already_obsolete = []
1974
1952
self.lock_names()
1976
1954
builder = self._index_builder_class()
1977
(disk_nodes, deleted_nodes, new_nodes,
1978
orig_disk_nodes) = self._diff_pack_names()
1955
disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
1979
1956
# TODO: handle same-name, index-size-changes here -
1980
1957
# e.g. use the value from disk, not ours, *unless* we're the one
1983
1960
builder.add_node(key, value)
1984
1961
self.transport.put_file('pack-names', builder.finish(),
1985
1962
mode=self.repo.bzrdir._get_file_mode())
1963
# move the baseline forward
1986
1964
self._packs_at_load = disk_nodes
1987
1965
if clear_obsolete_packs:
1990
to_preserve = set([o.name for o in obsolete_packs])
1991
already_obsolete = self._clear_obsolete_packs(to_preserve)
1966
self._clear_obsolete_packs()
1993
1968
self._unlock_names()
1994
1969
# synchronise the memory packs list with what we just wrote:
1995
1970
self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1997
# TODO: We could add one more condition here. "if o.name not in
1998
# orig_disk_nodes and o != the new_pack we haven't written to
1999
# disk yet. However, the new pack object is not easily
2000
# accessible here (it would have to be passed through the
2001
# autopacking code, etc.)
2002
obsolete_packs = [o for o in obsolete_packs
2003
if o.name not in already_obsolete]
2004
self._obsolete_packs(obsolete_packs)
2005
1971
return [new_node[0][0] for new_node in new_nodes]
2007
1973
def reload_pack_names(self):
2024
1990
# out the new value.
2025
(disk_nodes, deleted_nodes, new_nodes,
2026
orig_disk_nodes) = self._diff_pack_names()
2027
# _packs_at_load is meant to be the explicit list of names in
2028
# 'pack-names' at then start. As such, it should not contain any
2029
# pending names that haven't been written out yet.
2030
self._packs_at_load = orig_disk_nodes
1991
disk_nodes, _, _ = self._diff_pack_names()
1992
self._packs_at_load = disk_nodes
2031
1993
(removed, added,
2032
1994
modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
2033
1995
if removed or added or modified:
2043
2005
raise errors.RetryAutopack(self.repo, False, sys.exc_info())
2045
def _clear_obsolete_packs(self, preserve=None):
2007
def _clear_obsolete_packs(self):
2046
2008
"""Delete everything from the obsolete-packs directory.
2048
:return: A list of pack identifiers (the filename without '.pack') that
2049
were found in obsolete_packs.
2052
2010
obsolete_pack_transport = self.transport.clone('obsolete_packs')
2053
if preserve is None:
2055
2011
for filename in obsolete_pack_transport.list_dir('.'):
2056
name, ext = osutils.splitext(filename)
2059
if name in preserve:
2062
2013
obsolete_pack_transport.delete(filename)
2063
2014
except (errors.PathError, errors.TransportError), e:
2064
warning("couldn't delete obsolete pack, skipping it:\n%s"
2015
warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
2068
2017
def _start_write_group(self):
2069
2018
# Do not permit preparation for writing if we're not in a 'write lock'.
2096
2045
# FIXME: just drop the transient index.
2097
2046
# forget what names there are
2098
2047
if self._new_pack is not None:
2099
operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2100
operation.add_cleanup(setattr, self, '_new_pack', None)
2101
# If we aborted while in the middle of finishing the write
2102
# group, _remove_pack_indices could fail because the indexes are
2103
# already gone. But they're not there we shouldn't fail in this
2104
# case, so we pass ignore_missing=True.
2105
operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2106
ignore_missing=True)
2107
operation.run_simple()
2049
self._new_pack.abort()
2051
# XXX: If we aborted while in the middle of finishing the write
2052
# group, _remove_pack_indices can fail because the indexes are
2053
# already gone. If they're not there we shouldn't fail in this
2054
# case. -- mbp 20081113
2055
self._remove_pack_indices(self._new_pack)
2056
self._new_pack = None
2108
2057
for resumed_pack in self._resumed_packs:
2109
operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2110
# See comment in previous finally block.
2111
operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2112
ignore_missing=True)
2113
operation.run_simple()
2059
resumed_pack.abort()
2061
# See comment in previous finally block.
2063
self._remove_pack_indices(resumed_pack)
2114
2066
del self._resumed_packs[:]
2116
2068
def _remove_resumed_pack_indices(self):
2282
2234
self._reconcile_fixes_text_parents = True
2283
2235
self._reconcile_backsup_inventory = False
2285
def _warn_if_deprecated(self, branch=None):
2237
def _warn_if_deprecated(self):
2286
2238
# This class isn't deprecated, but one sub-format is
2287
2239
if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2288
super(KnitPackRepository, self)._warn_if_deprecated(branch)
2240
from bzrlib import repository
2241
if repository._deprecation_warning_done:
2243
repository._deprecation_warning_done = True
2244
warning("Format %s for %s is deprecated - please use"
2245
" 'bzr upgrade --1.6.1-rich-root'"
2246
% (self._format, self.bzrdir.transport.base))
2290
2248
def _abort_write_group(self):
2291
2249
self.revisions._index._key_dependencies.clear()
2342
2300
return self._write_lock_count
2344
2302
def lock_write(self, token=None):
2345
"""Lock the repository for writes.
2347
:return: A bzrlib.repository.RepositoryWriteLockResult.
2349
2303
locked = self.is_locked()
2350
2304
if not self._write_lock_count and locked:
2351
2305
raise errors.ReadOnlyError(self)
2360
2314
# Writes don't affect fallback repos
2361
2315
repo.lock_read()
2362
2316
self._refresh_data()
2363
return RepositoryWriteLockResult(self.unlock, None)
2365
2318
def lock_read(self):
2366
"""Lock the repository for reads.
2368
:return: A bzrlib.lock.LogicalLockResult.
2370
2319
locked = self.is_locked()
2371
2320
if self._write_lock_count:
2372
2321
self._write_lock_count += 1
2390
2338
raise NotImplementedError(self.dont_leave_lock_in_place)
2392
2340
@needs_write_lock
2393
def pack(self, hint=None, clean_obsolete_packs=False):
2341
def pack(self, hint=None):
2394
2342
"""Compress the data within the repository.
2396
2344
This will pack all the data to a single pack. In future it may
2397
2345
recompress deltas or do other such expensive operations.
2399
self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
2347
self._pack_collection.pack(hint=hint)
2401
2349
@needs_write_lock
2402
2350
def reconcile(self, other=None, thorough=False):
2558
2506
utf8_files = [('format', self.get_format_string())]
2560
2508
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2561
repository = self.open(a_bzrdir=a_bzrdir, _found=True)
2562
self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
2509
return self.open(a_bzrdir=a_bzrdir, _found=True)
2565
2511
def open(self, a_bzrdir, _found=False, _override_transport=None):
2566
2512
"""See RepositoryFormat.open().
2903
2848
repository_class = KnitPackRepository
2904
2849
_commit_builder_class = PackRootCommitBuilder
2905
2850
rich_root_data = True
2907
2851
supports_tree_reference = True
2908
2852
supports_external_lookups = True
2909
2853
# What index classes to use