228
228
return self.index_name('text', name)
230
230
def _replace_index_with_readonly(self, index_type):
231
unlimited_cache = False
232
if index_type == 'chk':
233
unlimited_cache = True
234
231
setattr(self, index_type + '_index',
235
232
self.index_class(self.index_transport,
236
233
self.index_name(index_type, self.name),
237
self.index_sizes[self.index_offset(index_type)],
238
unlimited_cache=unlimited_cache))
234
self.index_sizes[self.index_offset(index_type)]))
241
237
class ExistingPack(Pack):
589
578
flush_func=flush_func)
590
579
self.add_callback = None
581
def replace_indices(self, index_to_pack, indices):
582
"""Replace the current mappings with fresh ones.
584
This should probably not be used eventually, rather incremental add and
585
removal of indices. It has been added during refactoring of existing
588
:param index_to_pack: A mapping from index objects to
589
(transport, name) tuples for the pack file data.
590
:param indices: A list of indices.
592
# refresh the revision pack map dict without replacing the instance.
593
self.index_to_pack.clear()
594
self.index_to_pack.update(index_to_pack)
595
# XXX: API break - clearly a 'replace' method would be good?
596
self.combined_index._indices[:] = indices
597
# the current add nodes callback for the current writable index if
599
self.add_callback = None
592
601
def add_index(self, index, pack):
593
602
"""Add index to the aggregate, which is an index for Pack pack.
627
636
self.data_access.set_writer(None, None, (None, None))
628
637
self.index_to_pack.clear()
629
638
del self.combined_index._indices[:]
630
del self.combined_index._index_names[:]
631
639
self.add_callback = None
633
def remove_index(self, index):
641
def remove_index(self, index, pack):
634
642
"""Remove index from the indices used to answer queries.
636
644
:param index: An index from the pack parameter.
645
:param pack: A Pack instance.
638
647
del self.index_to_pack[index]
639
pos = self.combined_index._indices.index(index)
640
del self.combined_index._indices[pos]
641
del self.combined_index._index_names[pos]
648
self.combined_index._indices.remove(index)
642
649
if (self.add_callback is not None and
643
650
getattr(index, 'add_nodes', None) == self.add_callback):
644
651
self.add_callback = None
1400
1407
self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1401
1408
self.text_index = AggregateIndex(self.reload_pack_names, flush)
1402
1409
self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1403
all_indices = [self.revision_index, self.inventory_index,
1404
self.text_index, self.signature_index]
1405
1410
if use_chk_index:
1406
1411
self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1407
all_indices.append(self.chk_index)
1409
1413
# used to determine if we're using a chk_index elsewhere.
1410
1414
self.chk_index = None
1411
# Tell all the CombinedGraphIndex objects about each other, so they can
1412
# share hints about which pack names to search first.
1413
all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1414
for combined_idx in all_combined:
1415
combined_idx.set_sibling_indices(
1416
set(all_combined).difference([combined_idx]))
1417
1415
# resumed packs
1418
1416
self._resumed_packs = []
1421
return '%s(%r)' % (self.__class__.__name__, self.repo)
1423
1418
def add_pack_to_memory(self, pack):
1424
1419
"""Make a Pack object available to the repository to satisfy queries.
1506
1501
'containing %d revisions. Packing %d files into %d affecting %d'
1507
1502
' revisions', self, total_packs, total_revisions, num_old_packs,
1508
1503
num_new_packs, num_revs_affected)
1509
result = self._execute_pack_operations(pack_operations,
1504
self._execute_pack_operations(pack_operations,
1510
1505
reload_func=self._restart_autopack)
1511
1506
mutter('Auto-packing repository %s completed', self)
1514
1509
def _execute_pack_operations(self, pack_operations, _packer_class=Packer,
1515
1510
reload_func=None):
1539
1534
self._remove_pack_from_memory(pack)
1540
1535
# record the newly available packs and stop advertising the old
1542
to_be_obsoleted = []
1543
for _, packs in pack_operations:
1544
to_be_obsoleted.extend(packs)
1545
result = self._save_pack_names(clear_obsolete_packs=True,
1546
obsolete_packs=to_be_obsoleted)
1537
self._save_pack_names(clear_obsolete_packs=True)
1538
# Move the old packs out of the way now they are no longer referenced.
1539
for revision_count, packs in pack_operations:
1540
self._obsolete_packs(packs)
1549
1542
def _flush_new_pack(self):
1550
1543
if self._new_pack is not None:
1561
1554
def _already_packed(self):
1562
1555
"""Is the collection already packed?"""
1563
return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1556
return len(self._names) < 2
1565
def pack(self, hint=None, clean_obsolete_packs=False):
1566
1559
"""Pack the pack collection totally."""
1567
1560
self.ensure_loaded()
1568
1561
total_packs = len(self._names)
1569
1562
if self._already_packed():
1563
# This is arguably wrong because we might not be optimal, but for
1564
# now lets leave it in. (e.g. reconcile -> one pack. But not
1571
1567
total_revisions = self.revision_index.combined_index.key_count()
1572
1568
# XXX: the following may want to be a class, to pack with a given
1574
1570
mutter('Packing repository %s, which has %d pack files, '
1575
'containing %d revisions with hint %r.', self, total_packs,
1576
total_revisions, hint)
1571
'containing %d revisions into 1 packs.', self, total_packs,
1577
1573
# determine which packs need changing
1574
pack_distribution = [1]
1578
1575
pack_operations = [[0, []]]
1579
1576
for pack in self.all_packs():
1580
if hint is None or pack.name in hint:
1581
# Either no hint was provided (so we are packing everything),
1582
# or this pack was included in the hint.
1583
pack_operations[-1][0] += pack.get_revision_count()
1584
pack_operations[-1][1].append(pack)
1577
pack_operations[-1][0] += pack.get_revision_count()
1578
pack_operations[-1][1].append(pack)
1585
1579
self._execute_pack_operations(pack_operations, OptimisingPacker)
1587
if clean_obsolete_packs:
1588
self._clear_obsolete_packs()
1590
1581
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1591
1582
"""Plan a pack operation.
1705
1696
txt_index = self._make_index(name, '.tix', resume=True)
1706
1697
sig_index = self._make_index(name, '.six', resume=True)
1707
1698
if self.chk_index is not None:
1708
chk_index = self._make_index(name, '.cix', resume=True,
1709
unlimited_cache=True)
1699
chk_index = self._make_index(name, '.cix', resume=True)
1711
1701
chk_index = None
1712
1702
result = self.resumed_pack_factory(name, rev_index, inv_index,
1786
1775
:param return: None.
1788
1777
for pack in packs:
1790
pack.pack_transport.rename(pack.file_name(),
1791
'../obsolete_packs/' + pack.file_name())
1792
except (errors.PathError, errors.TransportError), e:
1793
# TODO: Should these be warnings or mutters?
1794
mutter("couldn't rename obsolete pack, skipping it:\n%s"
1778
pack.pack_transport.rename(pack.file_name(),
1779
'../obsolete_packs/' + pack.file_name())
1796
1780
# TODO: Probably needs to know all possible indices for this pack
1797
1781
# - or maybe list the directory and move all indices matching this
1798
1782
# name whether we recognize it or not?
1800
1784
if self.chk_index is not None:
1801
1785
suffixes.append('.cix')
1802
1786
for suffix in suffixes:
1804
self._index_transport.rename(pack.name + suffix,
1805
'../obsolete_packs/' + pack.name + suffix)
1806
except (errors.PathError, errors.TransportError), e:
1807
mutter("couldn't rename obsolete index, skipping it:\n%s"
1787
self._index_transport.rename(pack.name + suffix,
1788
'../obsolete_packs/' + pack.name + suffix)
1810
1790
def pack_distribution(self, total_revisions):
1811
1791
"""Generate a list of the number of revisions to put in each pack.
1837
1817
self._remove_pack_indices(pack)
1838
1818
self.packs.remove(pack)
1840
def _remove_pack_indices(self, pack, ignore_missing=False):
1841
"""Remove the indices for pack from the aggregated indices.
1843
:param ignore_missing: Suppress KeyErrors from calling remove_index.
1845
for index_type in Pack.index_definitions.keys():
1846
attr_name = index_type + '_index'
1847
aggregate_index = getattr(self, attr_name)
1848
if aggregate_index is not None:
1849
pack_index = getattr(pack, attr_name)
1851
aggregate_index.remove_index(pack_index)
1820
def _remove_pack_indices(self, pack):
1821
"""Remove the indices for pack from the aggregated indices."""
1822
self.revision_index.remove_index(pack.revision_index, pack)
1823
self.inventory_index.remove_index(pack.inventory_index, pack)
1824
self.text_index.remove_index(pack.text_index, pack)
1825
self.signature_index.remove_index(pack.signature_index, pack)
1826
if self.chk_index is not None:
1827
self.chk_index.remove_index(pack.chk_index, pack)
1857
1829
def reset(self):
1858
1830
"""Clear all cached data."""
1967
1938
:param clear_obsolete_packs: If True, clear out the contents of the
1968
1939
obsolete_packs directory.
1969
:param obsolete_packs: Packs that are obsolete once the new pack-names
1970
file has been written.
1971
:return: A list of the names saved that were not previously on disk.
1973
already_obsolete = []
1974
1941
self.lock_names()
1976
1943
builder = self._index_builder_class()
1977
(disk_nodes, deleted_nodes, new_nodes,
1978
orig_disk_nodes) = self._diff_pack_names()
1944
disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
1979
1945
# TODO: handle same-name, index-size-changes here -
1980
1946
# e.g. use the value from disk, not ours, *unless* we're the one
1983
1949
builder.add_node(key, value)
1984
1950
self.transport.put_file('pack-names', builder.finish(),
1985
1951
mode=self.repo.bzrdir._get_file_mode())
1952
# move the baseline forward
1986
1953
self._packs_at_load = disk_nodes
1987
1954
if clear_obsolete_packs:
1990
to_preserve = set([o.name for o in obsolete_packs])
1991
already_obsolete = self._clear_obsolete_packs(to_preserve)
1955
self._clear_obsolete_packs()
1993
1957
self._unlock_names()
1994
1958
# synchronise the memory packs list with what we just wrote:
1995
1959
self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1997
# TODO: We could add one more condition here. "if o.name not in
1998
# orig_disk_nodes and o != the new_pack we haven't written to
1999
# disk yet. However, the new pack object is not easily
2000
# accessible here (it would have to be passed through the
2001
# autopacking code, etc.)
2002
obsolete_packs = [o for o in obsolete_packs
2003
if o.name not in already_obsolete]
2004
self._obsolete_packs(obsolete_packs)
2005
return [new_node[0][0] for new_node in new_nodes]
2007
1961
def reload_pack_names(self):
2008
1962
"""Sync our pack listing with what is present in the repository.
2024
1978
# out the new value.
2025
(disk_nodes, deleted_nodes, new_nodes,
2026
orig_disk_nodes) = self._diff_pack_names()
2027
# _packs_at_load is meant to be the explicit list of names in
2028
# 'pack-names' at then start. As such, it should not contain any
2029
# pending names that haven't been written out yet.
2030
self._packs_at_load = orig_disk_nodes
1979
disk_nodes, _, _ = self._diff_pack_names()
1980
self._packs_at_load = disk_nodes
2031
1981
(removed, added,
2032
1982
modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
2033
1983
if removed or added or modified:
2043
1993
raise errors.RetryAutopack(self.repo, False, sys.exc_info())
2045
def _clear_obsolete_packs(self, preserve=None):
1995
def _clear_obsolete_packs(self):
2046
1996
"""Delete everything from the obsolete-packs directory.
2048
:return: A list of pack identifiers (the filename without '.pack') that
2049
were found in obsolete_packs.
2052
1998
obsolete_pack_transport = self.transport.clone('obsolete_packs')
2053
if preserve is None:
2055
1999
for filename in obsolete_pack_transport.list_dir('.'):
2056
name, ext = osutils.splitext(filename)
2059
if name in preserve:
2062
2001
obsolete_pack_transport.delete(filename)
2063
2002
except (errors.PathError, errors.TransportError), e:
2064
warning("couldn't delete obsolete pack, skipping it:\n%s"
2003
warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
2068
2005
def _start_write_group(self):
2069
2006
# Do not permit preparation for writing if we're not in a 'write lock'.
2096
2033
# FIXME: just drop the transient index.
2097
2034
# forget what names there are
2098
2035
if self._new_pack is not None:
2099
operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2100
operation.add_cleanup(setattr, self, '_new_pack', None)
2101
# If we aborted while in the middle of finishing the write
2102
# group, _remove_pack_indices could fail because the indexes are
2103
# already gone. But they're not there we shouldn't fail in this
2104
# case, so we pass ignore_missing=True.
2105
operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2106
ignore_missing=True)
2107
operation.run_simple()
2037
self._new_pack.abort()
2039
# XXX: If we aborted while in the middle of finishing the write
2040
# group, _remove_pack_indices can fail because the indexes are
2041
# already gone. If they're not there we shouldn't fail in this
2042
# case. -- mbp 20081113
2043
self._remove_pack_indices(self._new_pack)
2044
self._new_pack = None
2108
2045
for resumed_pack in self._resumed_packs:
2109
operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2110
# See comment in previous finally block.
2111
operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2112
ignore_missing=True)
2113
operation.run_simple()
2047
resumed_pack.abort()
2049
# See comment in previous finally block.
2051
self._remove_pack_indices(resumed_pack)
2114
2054
del self._resumed_packs[:]
2116
2056
def _remove_resumed_pack_indices(self):
2142
2072
raise errors.BzrCheckError(
2143
2073
"Repository %s has missing compression parent(s) %r "
2144
2074
% (self.repo, sorted(all_missing)))
2145
problems = self._check_new_inventories()
2147
problems_summary = '\n'.join(problems)
2148
raise errors.BzrCheckError(
2149
"Cannot add revision(s) to repository: " + problems_summary)
2150
2075
self._remove_pack_indices(self._new_pack)
2151
any_new_content = False
2076
should_autopack = False
2152
2077
if self._new_pack.data_inserted():
2153
2078
# get all the data to disk and read to use
2154
2079
self._new_pack.finish()
2155
2080
self.allocate(self._new_pack)
2156
2081
self._new_pack = None
2157
any_new_content = True
2082
should_autopack = True
2159
2084
self._new_pack.abort()
2160
2085
self._new_pack = None
2282
2204
self._reconcile_fixes_text_parents = True
2283
2205
self._reconcile_backsup_inventory = False
2285
def _warn_if_deprecated(self, branch=None):
2207
def _warn_if_deprecated(self):
2286
2208
# This class isn't deprecated, but one sub-format is
2287
2209
if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2288
super(KnitPackRepository, self)._warn_if_deprecated(branch)
2210
from bzrlib import repository
2211
if repository._deprecation_warning_done:
2213
repository._deprecation_warning_done = True
2214
warning("Format %s for %s is deprecated - please use"
2215
" 'bzr upgrade --1.6.1-rich-root'"
2216
% (self._format, self.bzrdir.transport.base))
2290
2218
def _abort_write_group(self):
2291
self.revisions._index._key_dependencies.clear()
2219
self.revisions._index._key_dependencies.refs.clear()
2292
2220
self._pack_collection._abort_write_group()
2294
def _get_source(self, to_format):
2295
if to_format.network_name() == self._format.network_name():
2296
return KnitPackStreamSource(self, to_format)
2297
return super(KnitPackRepository, self)._get_source(to_format)
2222
def _find_inconsistent_revision_parents(self):
2223
"""Find revisions with incorrectly cached parents.
2225
:returns: an iterator yielding tuples of (revison-id, parents-in-index,
2226
parents-in-revision).
2228
if not self.is_locked():
2229
raise errors.ObjectNotLocked(self)
2230
pb = ui.ui_factory.nested_progress_bar()
2233
revision_nodes = self._pack_collection.revision_index \
2234
.combined_index.iter_all_entries()
2235
index_positions = []
2236
# Get the cached index values for all revisions, and also the
2237
# location in each index of the revision text so we can perform
2239
for index, key, value, refs in revision_nodes:
2240
node = (index, key, value, refs)
2241
index_memo = self.revisions._index._node_to_position(node)
2242
if index_memo[0] != index:
2243
raise AssertionError('%r != %r' % (index_memo[0], index))
2244
index_positions.append((index_memo, key[0],
2245
tuple(parent[0] for parent in refs[0])))
2246
pb.update("Reading revision index", 0, 0)
2247
index_positions.sort()
2249
pb.update("Checking cached revision graph", 0,
2250
len(index_positions))
2251
for offset in xrange(0, len(index_positions), 1000):
2252
pb.update("Checking cached revision graph", offset)
2253
to_query = index_positions[offset:offset + batch_size]
2256
rev_ids = [item[1] for item in to_query]
2257
revs = self.get_revisions(rev_ids)
2258
for revision, item in zip(revs, to_query):
2259
index_parents = item[2]
2260
rev_parents = tuple(revision.parent_ids)
2261
if index_parents != rev_parents:
2262
result.append((revision.revision_id, index_parents,
2299
2268
def _make_parents_provider(self):
2300
2269
return graph.CachingParentsProvider(self)
2308
2277
self._pack_collection._start_write_group()
2310
2279
def _commit_write_group(self):
2311
hint = self._pack_collection._commit_write_group()
2312
self.revisions._index._key_dependencies.clear()
2280
self.revisions._index._key_dependencies.refs.clear()
2281
return self._pack_collection._commit_write_group()
2315
2283
def suspend_write_group(self):
2316
2284
# XXX check self._write_group is self.get_transaction()?
2317
2285
tokens = self._pack_collection._suspend_write_group()
2318
self.revisions._index._key_dependencies.clear()
2286
self.revisions._index._key_dependencies.refs.clear()
2319
2287
self._write_group = None
2353
2317
if self._write_lock_count == 1:
2354
2318
self._transaction = transactions.WriteTransaction()
2356
if 'relock' in debug.debug_flags and self._prev_lock == 'w':
2357
note('%r was write locked again', self)
2358
self._prev_lock = 'w'
2359
2320
for repo in self._fallback_repositories:
2360
2321
# Writes don't affect fallback repos
2361
2322
repo.lock_read()
2362
2323
self._refresh_data()
2363
return RepositoryWriteLockResult(self.unlock, None)
2365
2325
def lock_read(self):
2366
"""Lock the repository for reads.
2368
:return: A bzrlib.lock.LogicalLockResult.
2370
2326
locked = self.is_locked()
2371
2327
if self._write_lock_count:
2372
2328
self._write_lock_count += 1
2374
2330
self.control_files.lock_read()
2376
if 'relock' in debug.debug_flags and self._prev_lock == 'r':
2377
note('%r was read locked again', self)
2378
self._prev_lock = 'r'
2379
2332
for repo in self._fallback_repositories:
2380
2333
repo.lock_read()
2381
2334
self._refresh_data()
2382
return LogicalLockResult(self.unlock)
2384
2336
def leave_lock_in_place(self):
2385
2337
# not supported - raise an error
2390
2342
raise NotImplementedError(self.dont_leave_lock_in_place)
2392
2344
@needs_write_lock
2393
def pack(self, hint=None, clean_obsolete_packs=False):
2394
2346
"""Compress the data within the repository.
2396
2348
This will pack all the data to a single pack. In future it may
2397
2349
recompress deltas or do other such expensive operations.
2399
self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
2351
self._pack_collection.pack()
2401
2353
@needs_write_lock
2402
2354
def reconcile(self, other=None, thorough=False):
2436
class KnitPackStreamSource(StreamSource):
2437
"""A StreamSource used to transfer data between same-format KnitPack repos.
2439
This source assumes:
2440
1) Same serialization format for all objects
2441
2) Same root information
2442
3) XML format inventories
2443
4) Atomic inserts (so we can stream inventory texts before text
2448
def __init__(self, from_repository, to_format):
2449
super(KnitPackStreamSource, self).__init__(from_repository, to_format)
2450
self._text_keys = None
2451
self._text_fetch_order = 'unordered'
2453
def _get_filtered_inv_stream(self, revision_ids):
2454
from_repo = self.from_repository
2455
parent_ids = from_repo._find_parent_ids_of_revisions(revision_ids)
2456
parent_keys = [(p,) for p in parent_ids]
2457
find_text_keys = from_repo._find_text_key_references_from_xml_inventory_lines
2458
parent_text_keys = set(find_text_keys(
2459
from_repo._inventory_xml_lines_for_keys(parent_keys)))
2460
content_text_keys = set()
2461
knit = KnitVersionedFiles(None, None)
2462
factory = KnitPlainFactory()
2463
def find_text_keys_from_content(record):
2464
if record.storage_kind not in ('knit-delta-gz', 'knit-ft-gz'):
2465
raise ValueError("Unknown content storage kind for"
2466
" inventory text: %s" % (record.storage_kind,))
2467
# It's a knit record, it has a _raw_record field (even if it was
2468
# reconstituted from a network stream).
2469
raw_data = record._raw_record
2470
# read the entire thing
2471
revision_id = record.key[-1]
2472
content, _ = knit._parse_record(revision_id, raw_data)
2473
if record.storage_kind == 'knit-delta-gz':
2474
line_iterator = factory.get_linedelta_content(content)
2475
elif record.storage_kind == 'knit-ft-gz':
2476
line_iterator = factory.get_fulltext_content(content)
2477
content_text_keys.update(find_text_keys(
2478
[(line, revision_id) for line in line_iterator]))
2479
revision_keys = [(r,) for r in revision_ids]
2480
def _filtered_inv_stream():
2481
source_vf = from_repo.inventories
2482
stream = source_vf.get_record_stream(revision_keys,
2484
for record in stream:
2485
if record.storage_kind == 'absent':
2486
raise errors.NoSuchRevision(from_repo, record.key)
2487
find_text_keys_from_content(record)
2489
self._text_keys = content_text_keys - parent_text_keys
2490
return ('inventories', _filtered_inv_stream())
2492
def _get_text_stream(self):
2493
# Note: We know we don't have to handle adding root keys, because both
2494
# the source and target are the identical network name.
2495
text_stream = self.from_repository.texts.get_record_stream(
2496
self._text_keys, self._text_fetch_order, False)
2497
return ('texts', text_stream)
2499
def get_stream(self, search):
2500
revision_ids = search.get_keys()
2501
for stream_info in self._fetch_revision_texts(revision_ids):
2503
self._revision_keys = [(rev_id,) for rev_id in revision_ids]
2504
yield self._get_filtered_inv_stream(revision_ids)
2505
yield self._get_text_stream()
2509
2387
class RepositoryFormatPack(MetaDirRepositoryFormat):
2510
2388
"""Format logic for pack structured repositories.
2558
2436
utf8_files = [('format', self.get_format_string())]
2560
2438
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2561
repository = self.open(a_bzrdir=a_bzrdir, _found=True)
2562
self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
2439
return self.open(a_bzrdir=a_bzrdir, _found=True)
2565
2441
def open(self, a_bzrdir, _found=False, _override_transport=None):
2566
2442
"""See RepositoryFormat.open().
2648
2526
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2528
def check_conversion_target(self, target_format):
2529
if not target_format.rich_root_data:
2530
raise errors.BadConversionTarget(
2531
'Does not support rich root data.', target_format)
2532
if not getattr(target_format, 'supports_tree_reference', False):
2533
raise errors.BadConversionTarget(
2534
'Does not support nested trees', target_format)
2650
2536
def get_format_string(self):
2651
2537
"""See RepositoryFormat.get_format_string()."""
2652
2538
return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
2924
2835
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2837
def check_conversion_target(self, target_format):
2838
if not target_format.rich_root_data:
2839
raise errors.BadConversionTarget(
2840
'Does not support rich root data.', target_format)
2841
if not getattr(target_format, 'supports_tree_reference', False):
2842
raise errors.BadConversionTarget(
2843
'Does not support nested trees', target_format)
2926
2845
def get_format_string(self):
2927
2846
"""See RepositoryFormat.get_format_string()."""
2928
2847
return ("Bazaar development format 2 with subtree support "