228
224
return self.index_name('text', name)
230
226
def _replace_index_with_readonly(self, index_type):
231
unlimited_cache = False
232
if index_type == 'chk':
233
unlimited_cache = True
234
227
setattr(self, index_type + '_index',
235
228
self.index_class(self.index_transport,
236
229
self.index_name(index_type, self.name),
237
self.index_sizes[self.index_offset(index_type)],
238
unlimited_cache=unlimited_cache))
230
self.index_sizes[self.index_offset(index_type)]))
241
233
class ExistingPack(Pack):
589
574
flush_func=flush_func)
590
575
self.add_callback = None
577
def replace_indices(self, index_to_pack, indices):
578
"""Replace the current mappings with fresh ones.
580
This should probably not be used eventually, rather incremental add and
581
removal of indices. It has been added during refactoring of existing
584
:param index_to_pack: A mapping from index objects to
585
(transport, name) tuples for the pack file data.
586
:param indices: A list of indices.
588
# refresh the revision pack map dict without replacing the instance.
589
self.index_to_pack.clear()
590
self.index_to_pack.update(index_to_pack)
591
# XXX: API break - clearly a 'replace' method would be good?
592
self.combined_index._indices[:] = indices
593
# the current add nodes callback for the current writable index if
595
self.add_callback = None
592
597
def add_index(self, index, pack):
593
598
"""Add index to the aggregate, which is an index for Pack pack.
601
606
# expose it to the index map
602
607
self.index_to_pack[index] = pack.access_tuple()
603
608
# put it at the front of the linear index list
604
self.combined_index.insert_index(0, index, pack.name)
609
self.combined_index.insert_index(0, index)
606
611
def add_writable_index(self, index, pack):
607
612
"""Add an index which is able to have data added to it.
627
632
self.data_access.set_writer(None, None, (None, None))
628
633
self.index_to_pack.clear()
629
634
del self.combined_index._indices[:]
630
del self.combined_index._index_names[:]
631
635
self.add_callback = None
633
def remove_index(self, index):
637
def remove_index(self, index, pack):
634
638
"""Remove index from the indices used to answer queries.
636
640
:param index: An index from the pack parameter.
641
:param pack: A Pack instance.
638
643
del self.index_to_pack[index]
639
pos = self.combined_index._indices.index(index)
640
del self.combined_index._indices[pos]
641
del self.combined_index._index_names[pos]
644
self.combined_index._indices.remove(index)
642
645
if (self.add_callback is not None and
643
646
getattr(index, 'add_nodes', None) == self.add_callback):
644
647
self.add_callback = None
1102
1105
iterator is a tuple with:
1103
1106
index, readv_vector, node_vector. readv_vector is a list ready to
1104
1107
hand to the transport readv method, and node_vector is a list of
1105
(key, eol_flag, references) for the node retrieved by the
1108
(key, eol_flag, references) for the the node retrieved by the
1106
1109
matching readv_vector.
1108
1111
# group by pack so we do one readv per pack
1400
1403
self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1401
1404
self.text_index = AggregateIndex(self.reload_pack_names, flush)
1402
1405
self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1403
all_indices = [self.revision_index, self.inventory_index,
1404
self.text_index, self.signature_index]
1405
1406
if use_chk_index:
1406
1407
self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1407
all_indices.append(self.chk_index)
1409
1409
# used to determine if we're using a chk_index elsewhere.
1410
1410
self.chk_index = None
1411
# Tell all the CombinedGraphIndex objects about each other, so they can
1412
# share hints about which pack names to search first.
1413
all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1414
for combined_idx in all_combined:
1415
combined_idx.set_sibling_indices(
1416
set(all_combined).difference([combined_idx]))
1417
1411
# resumed packs
1418
1412
self._resumed_packs = []
1421
return '%s(%r)' % (self.__class__.__name__, self.repo)
1423
1414
def add_pack_to_memory(self, pack):
1424
1415
"""Make a Pack object available to the repository to satisfy queries.
1539
1530
self._remove_pack_from_memory(pack)
1540
1531
# record the newly available packs and stop advertising the old
1542
to_be_obsoleted = []
1543
for _, packs in pack_operations:
1544
to_be_obsoleted.extend(packs)
1545
result = self._save_pack_names(clear_obsolete_packs=True,
1546
obsolete_packs=to_be_obsoleted)
1533
result = self._save_pack_names(clear_obsolete_packs=True)
1534
# Move the old packs out of the way now they are no longer referenced.
1535
for revision_count, packs in pack_operations:
1536
self._obsolete_packs(packs)
1549
1539
def _flush_new_pack(self):
1577
1567
# determine which packs need changing
1578
1568
pack_operations = [[0, []]]
1579
1569
for pack in self.all_packs():
1580
if hint is None or pack.name in hint:
1581
# Either no hint was provided (so we are packing everything),
1582
# or this pack was included in the hint.
1570
if not hint or pack.name in hint:
1583
1571
pack_operations[-1][0] += pack.get_revision_count()
1584
1572
pack_operations[-1][1].append(pack)
1585
1573
self._execute_pack_operations(pack_operations, OptimisingPacker)
1587
if clean_obsolete_packs:
1588
self._clear_obsolete_packs()
1590
1575
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1591
1576
"""Plan a pack operation.
1680
1665
txt_index = self._make_index(name, '.tix')
1681
1666
sig_index = self._make_index(name, '.six')
1682
1667
if self.chk_index is not None:
1683
chk_index = self._make_index(name, '.cix', unlimited_cache=True)
1668
chk_index = self._make_index(name, '.cix')
1685
1670
chk_index = None
1686
1671
result = ExistingPack(self._pack_transport, name, rev_index,
1705
1690
txt_index = self._make_index(name, '.tix', resume=True)
1706
1691
sig_index = self._make_index(name, '.six', resume=True)
1707
1692
if self.chk_index is not None:
1708
chk_index = self._make_index(name, '.cix', resume=True,
1709
unlimited_cache=True)
1693
chk_index = self._make_index(name, '.cix', resume=True)
1711
1695
chk_index = None
1712
1696
result = self.resumed_pack_factory(name, rev_index, inv_index,
1742
1726
return self._index_class(self.transport, 'pack-names', None
1743
1727
).iter_all_entries()
1745
def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
1729
def _make_index(self, name, suffix, resume=False):
1746
1730
size_offset = self._suffix_offsets[suffix]
1747
1731
index_name = name + suffix
1752
1736
transport = self._index_transport
1753
1737
index_size = self._names[name][size_offset]
1754
return self._index_class(transport, index_name, index_size,
1755
unlimited_cache=unlimited_cache)
1738
return self._index_class(transport, index_name, index_size)
1757
1740
def _max_pack_count(self, total_revisions):
1758
1741
"""Return the maximum number of packs to use for total revisions.
1786
1769
:param return: None.
1788
1771
for pack in packs:
1790
pack.pack_transport.rename(pack.file_name(),
1791
'../obsolete_packs/' + pack.file_name())
1792
except (errors.PathError, errors.TransportError), e:
1793
# TODO: Should these be warnings or mutters?
1794
mutter("couldn't rename obsolete pack, skipping it:\n%s"
1772
pack.pack_transport.rename(pack.file_name(),
1773
'../obsolete_packs/' + pack.file_name())
1796
1774
# TODO: Probably needs to know all possible indices for this pack
1797
1775
# - or maybe list the directory and move all indices matching this
1798
1776
# name whether we recognize it or not?
1800
1778
if self.chk_index is not None:
1801
1779
suffixes.append('.cix')
1802
1780
for suffix in suffixes:
1804
self._index_transport.rename(pack.name + suffix,
1805
'../obsolete_packs/' + pack.name + suffix)
1806
except (errors.PathError, errors.TransportError), e:
1807
mutter("couldn't rename obsolete index, skipping it:\n%s"
1781
self._index_transport.rename(pack.name + suffix,
1782
'../obsolete_packs/' + pack.name + suffix)
1810
1784
def pack_distribution(self, total_revisions):
1811
1785
"""Generate a list of the number of revisions to put in each pack.
1837
1811
self._remove_pack_indices(pack)
1838
1812
self.packs.remove(pack)
1840
def _remove_pack_indices(self, pack, ignore_missing=False):
1841
"""Remove the indices for pack from the aggregated indices.
1843
:param ignore_missing: Suppress KeyErrors from calling remove_index.
1845
for index_type in Pack.index_definitions.keys():
1846
attr_name = index_type + '_index'
1847
aggregate_index = getattr(self, attr_name)
1848
if aggregate_index is not None:
1849
pack_index = getattr(pack, attr_name)
1851
aggregate_index.remove_index(pack_index)
1814
def _remove_pack_indices(self, pack):
1815
"""Remove the indices for pack from the aggregated indices."""
1816
self.revision_index.remove_index(pack.revision_index, pack)
1817
self.inventory_index.remove_index(pack.inventory_index, pack)
1818
self.text_index.remove_index(pack.text_index, pack)
1819
self.signature_index.remove_index(pack.signature_index, pack)
1820
if self.chk_index is not None:
1821
self.chk_index.remove_index(pack.chk_index, pack)
1857
1823
def reset(self):
1858
1824
"""Clear all cached data."""
1910
1875
disk_nodes.difference_update(deleted_nodes)
1911
1876
disk_nodes.update(new_nodes)
1913
return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
1878
return disk_nodes, deleted_nodes, new_nodes
1915
1880
def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1916
1881
"""Given the correct set of pack files, update our saved info.
1967
1932
:param clear_obsolete_packs: If True, clear out the contents of the
1968
1933
obsolete_packs directory.
1969
:param obsolete_packs: Packs that are obsolete once the new pack-names
1970
file has been written.
1971
1934
:return: A list of the names saved that were not previously on disk.
1973
already_obsolete = []
1974
1936
self.lock_names()
1976
1938
builder = self._index_builder_class()
1977
(disk_nodes, deleted_nodes, new_nodes,
1978
orig_disk_nodes) = self._diff_pack_names()
1939
disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
1979
1940
# TODO: handle same-name, index-size-changes here -
1980
1941
# e.g. use the value from disk, not ours, *unless* we're the one
1983
1944
builder.add_node(key, value)
1984
1945
self.transport.put_file('pack-names', builder.finish(),
1985
1946
mode=self.repo.bzrdir._get_file_mode())
1947
# move the baseline forward
1986
1948
self._packs_at_load = disk_nodes
1987
1949
if clear_obsolete_packs:
1990
to_preserve = set([o.name for o in obsolete_packs])
1991
already_obsolete = self._clear_obsolete_packs(to_preserve)
1950
self._clear_obsolete_packs()
1993
1952
self._unlock_names()
1994
1953
# synchronise the memory packs list with what we just wrote:
1995
1954
self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1997
# TODO: We could add one more condition here. "if o.name not in
1998
# orig_disk_nodes and o != the new_pack we haven't written to
1999
# disk yet. However, the new pack object is not easily
2000
# accessible here (it would have to be passed through the
2001
# autopacking code, etc.)
2002
obsolete_packs = [o for o in obsolete_packs
2003
if o.name not in already_obsolete]
2004
self._obsolete_packs(obsolete_packs)
2005
1955
return [new_node[0][0] for new_node in new_nodes]
2007
1957
def reload_pack_names(self):
2024
1974
# out the new value.
2025
(disk_nodes, deleted_nodes, new_nodes,
2026
orig_disk_nodes) = self._diff_pack_names()
2027
# _packs_at_load is meant to be the explicit list of names in
2028
# 'pack-names' at then start. As such, it should not contain any
2029
# pending names that haven't been written out yet.
2030
self._packs_at_load = orig_disk_nodes
1975
disk_nodes, _, _ = self._diff_pack_names()
1976
self._packs_at_load = disk_nodes
2031
1977
(removed, added,
2032
1978
modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
2033
1979
if removed or added or modified:
2043
1989
raise errors.RetryAutopack(self.repo, False, sys.exc_info())
2045
def _clear_obsolete_packs(self, preserve=None):
1991
def _clear_obsolete_packs(self):
2046
1992
"""Delete everything from the obsolete-packs directory.
2048
:return: A list of pack identifiers (the filename without '.pack') that
2049
were found in obsolete_packs.
2052
1994
obsolete_pack_transport = self.transport.clone('obsolete_packs')
2053
if preserve is None:
2055
1995
for filename in obsolete_pack_transport.list_dir('.'):
2056
name, ext = osutils.splitext(filename)
2059
if name in preserve:
2062
1997
obsolete_pack_transport.delete(filename)
2063
1998
except (errors.PathError, errors.TransportError), e:
2064
warning("couldn't delete obsolete pack, skipping it:\n%s"
1999
warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
2068
2001
def _start_write_group(self):
2069
2002
# Do not permit preparation for writing if we're not in a 'write lock'.
2096
2029
# FIXME: just drop the transient index.
2097
2030
# forget what names there are
2098
2031
if self._new_pack is not None:
2099
operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2100
operation.add_cleanup(setattr, self, '_new_pack', None)
2101
# If we aborted while in the middle of finishing the write
2102
# group, _remove_pack_indices could fail because the indexes are
2103
# already gone. But they're not there we shouldn't fail in this
2104
# case, so we pass ignore_missing=True.
2105
operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2106
ignore_missing=True)
2107
operation.run_simple()
2033
self._new_pack.abort()
2035
# XXX: If we aborted while in the middle of finishing the write
2036
# group, _remove_pack_indices can fail because the indexes are
2037
# already gone. If they're not there we shouldn't fail in this
2038
# case. -- mbp 20081113
2039
self._remove_pack_indices(self._new_pack)
2040
self._new_pack = None
2108
2041
for resumed_pack in self._resumed_packs:
2109
operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2110
# See comment in previous finally block.
2111
operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2112
ignore_missing=True)
2113
operation.run_simple()
2043
resumed_pack.abort()
2045
# See comment in previous finally block.
2047
self._remove_pack_indices(resumed_pack)
2114
2050
del self._resumed_packs[:]
2116
2052
def _remove_resumed_pack_indices(self):
2118
2054
self._remove_pack_indices(resumed_pack)
2119
2055
del self._resumed_packs[:]
2121
def _check_new_inventories(self):
2122
"""Detect missing inventories in this write group.
2124
:returns: list of strs, summarising any problems found. If the list is
2125
empty no problems were found.
2127
# The base implementation does no checks. GCRepositoryPackCollection
2131
2057
def _commit_write_group(self):
2132
2058
all_missing = set()
2133
2059
for prefix, versioned_file in (
2142
2068
raise errors.BzrCheckError(
2143
2069
"Repository %s has missing compression parent(s) %r "
2144
2070
% (self.repo, sorted(all_missing)))
2145
problems = self._check_new_inventories()
2147
problems_summary = '\n'.join(problems)
2148
raise errors.BzrCheckError(
2149
"Cannot add revision(s) to repository: " + problems_summary)
2150
2071
self._remove_pack_indices(self._new_pack)
2151
any_new_content = False
2072
should_autopack = False
2152
2073
if self._new_pack.data_inserted():
2153
2074
# get all the data to disk and read to use
2154
2075
self._new_pack.finish()
2155
2076
self.allocate(self._new_pack)
2156
2077
self._new_pack = None
2157
any_new_content = True
2078
should_autopack = True
2159
2080
self._new_pack.abort()
2160
2081
self._new_pack = None
2282
2200
self._reconcile_fixes_text_parents = True
2283
2201
self._reconcile_backsup_inventory = False
2285
def _warn_if_deprecated(self, branch=None):
2203
def _warn_if_deprecated(self):
2286
2204
# This class isn't deprecated, but one sub-format is
2287
2205
if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2288
super(KnitPackRepository, self)._warn_if_deprecated(branch)
2206
from bzrlib import repository
2207
if repository._deprecation_warning_done:
2209
repository._deprecation_warning_done = True
2210
warning("Format %s for %s is deprecated - please use"
2211
" 'bzr upgrade --1.6.1-rich-root'"
2212
% (self._format, self.bzrdir.transport.base))
2290
2214
def _abort_write_group(self):
2291
self.revisions._index._key_dependencies.clear()
2215
self.revisions._index._key_dependencies.refs.clear()
2292
2216
self._pack_collection._abort_write_group()
2218
def _find_inconsistent_revision_parents(self):
2219
"""Find revisions with incorrectly cached parents.
2221
:returns: an iterator yielding tuples of (revison-id, parents-in-index,
2222
parents-in-revision).
2224
if not self.is_locked():
2225
raise errors.ObjectNotLocked(self)
2226
pb = ui.ui_factory.nested_progress_bar()
2229
revision_nodes = self._pack_collection.revision_index \
2230
.combined_index.iter_all_entries()
2231
index_positions = []
2232
# Get the cached index values for all revisions, and also the
2233
# location in each index of the revision text so we can perform
2235
for index, key, value, refs in revision_nodes:
2236
node = (index, key, value, refs)
2237
index_memo = self.revisions._index._node_to_position(node)
2238
if index_memo[0] != index:
2239
raise AssertionError('%r != %r' % (index_memo[0], index))
2240
index_positions.append((index_memo, key[0],
2241
tuple(parent[0] for parent in refs[0])))
2242
pb.update("Reading revision index", 0, 0)
2243
index_positions.sort()
2245
pb.update("Checking cached revision graph", 0,
2246
len(index_positions))
2247
for offset in xrange(0, len(index_positions), 1000):
2248
pb.update("Checking cached revision graph", offset)
2249
to_query = index_positions[offset:offset + batch_size]
2252
rev_ids = [item[1] for item in to_query]
2253
revs = self.get_revisions(rev_ids)
2254
for revision, item in zip(revs, to_query):
2255
index_parents = item[2]
2256
rev_parents = tuple(revision.parent_ids)
2257
if index_parents != rev_parents:
2258
result.append((revision.revision_id, index_parents,
2294
2264
def _get_source(self, to_format):
2295
2265
if to_format.network_name() == self._format.network_name():
2296
2266
return KnitPackStreamSource(self, to_format)
2308
2278
self._pack_collection._start_write_group()
2310
2280
def _commit_write_group(self):
2311
hint = self._pack_collection._commit_write_group()
2312
self.revisions._index._key_dependencies.clear()
2281
self.revisions._index._key_dependencies.refs.clear()
2282
return self._pack_collection._commit_write_group()
2315
2284
def suspend_write_group(self):
2316
2285
# XXX check self._write_group is self.get_transaction()?
2317
2286
tokens = self._pack_collection._suspend_write_group()
2318
self.revisions._index._key_dependencies.clear()
2287
self.revisions._index._key_dependencies.refs.clear()
2319
2288
self._write_group = None
2353
2318
if self._write_lock_count == 1:
2354
2319
self._transaction = transactions.WriteTransaction()
2356
if 'relock' in debug.debug_flags and self._prev_lock == 'w':
2357
note('%r was write locked again', self)
2358
self._prev_lock = 'w'
2359
2321
for repo in self._fallback_repositories:
2360
2322
# Writes don't affect fallback repos
2361
2323
repo.lock_read()
2362
2324
self._refresh_data()
2363
return RepositoryWriteLockResult(self.unlock, None)
2365
2326
def lock_read(self):
2366
"""Lock the repository for reads.
2368
:return: A bzrlib.lock.LogicalLockResult.
2370
2327
locked = self.is_locked()
2371
2328
if self._write_lock_count:
2372
2329
self._write_lock_count += 1
2374
2331
self.control_files.lock_read()
2376
if 'relock' in debug.debug_flags and self._prev_lock == 'r':
2377
note('%r was read locked again', self)
2378
self._prev_lock = 'r'
2379
2333
for repo in self._fallback_repositories:
2380
2334
repo.lock_read()
2381
2335
self._refresh_data()
2382
return LogicalLockResult(self.unlock)
2384
2337
def leave_lock_in_place(self):
2385
2338
# not supported - raise an error
2390
2343
raise NotImplementedError(self.dont_leave_lock_in_place)
2392
2345
@needs_write_lock
2393
def pack(self, hint=None, clean_obsolete_packs=False):
2346
def pack(self, hint=None):
2394
2347
"""Compress the data within the repository.
2396
2349
This will pack all the data to a single pack. In future it may
2397
2350
recompress deltas or do other such expensive operations.
2399
self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
2352
self._pack_collection.pack(hint=hint)
2401
2354
@needs_write_lock
2402
2355
def reconcile(self, other=None, thorough=False):
2558
2510
utf8_files = [('format', self.get_format_string())]
2560
2512
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2561
repository = self.open(a_bzrdir=a_bzrdir, _found=True)
2562
self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
2513
return self.open(a_bzrdir=a_bzrdir, _found=True)
2565
2515
def open(self, a_bzrdir, _found=False, _override_transport=None):
2566
2516
"""See RepositoryFormat.open().
2648
2600
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2602
def check_conversion_target(self, target_format):
2603
if not target_format.rich_root_data:
2604
raise errors.BadConversionTarget(
2605
'Does not support rich root data.', target_format)
2606
if not getattr(target_format, 'supports_tree_reference', False):
2607
raise errors.BadConversionTarget(
2608
'Does not support nested trees', target_format)
2650
2610
def get_format_string(self):
2651
2611
"""See RepositoryFormat.get_format_string()."""
2652
2612
return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
2686
2646
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2648
def check_conversion_target(self, target_format):
2649
if not target_format.rich_root_data:
2650
raise errors.BadConversionTarget(
2651
'Does not support rich root data.', target_format)
2688
2653
def get_format_string(self):
2689
2654
"""See RepositoryFormat.get_format_string()."""
2690
2655
return ("Bazaar pack repository format 1 with rich root"
2764
2732
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2734
def check_conversion_target(self, target_format):
2735
if not target_format.rich_root_data:
2736
raise errors.BadConversionTarget(
2737
'Does not support rich root data.', target_format)
2766
2739
def get_format_string(self):
2767
2740
"""See RepositoryFormat.get_format_string()."""
2768
2741
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
2810
2783
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2785
def check_conversion_target(self, target_format):
2786
if not target_format.rich_root_data:
2787
raise errors.BadConversionTarget(
2788
'Does not support rich root data.', target_format)
2812
2790
def get_format_string(self):
2813
2791
"""See RepositoryFormat.get_format_string()."""
2814
2792
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
2882
2863
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2865
def check_conversion_target(self, target_format):
2866
if not target_format.rich_root_data:
2867
raise errors.BadConversionTarget(
2868
'Does not support rich root data.', target_format)
2884
2870
def get_format_string(self):
2885
2871
"""See RepositoryFormat.get_format_string()."""
2886
2872
return "Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n"
2924
2909
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2911
def check_conversion_target(self, target_format):
2912
if not target_format.rich_root_data:
2913
raise errors.BadConversionTarget(
2914
'Does not support rich root data.', target_format)
2915
if not getattr(target_format, 'supports_tree_reference', False):
2916
raise errors.BadConversionTarget(
2917
'Does not support nested trees', target_format)
2926
2919
def get_format_string(self):
2927
2920
"""See RepositoryFormat.get_format_string()."""
2928
2921
return ("Bazaar development format 2 with subtree support "