214
260
return not self.__eq__(other)
216
262
def __repr__(self):
217
return "<bzrlib.repofmt.pack_repo.Pack object at 0x%x, %s, %s" % (
218
id(self), self.transport, self.name)
263
return "<%s.%s object at 0x%x, %s, %s" % (
264
self.__class__.__module__, self.__class__.__name__, id(self),
265
self.pack_transport, self.name)
268
class ResumedPack(ExistingPack):
270
def __init__(self, name, revision_index, inventory_index, text_index,
271
signature_index, upload_transport, pack_transport, index_transport,
272
pack_collection, chk_index=None):
273
"""Create a ResumedPack object."""
274
ExistingPack.__init__(self, pack_transport, name, revision_index,
275
inventory_index, text_index, signature_index,
277
self.upload_transport = upload_transport
278
self.index_transport = index_transport
279
self.index_sizes = [None, None, None, None]
281
('revision', revision_index),
282
('inventory', inventory_index),
283
('text', text_index),
284
('signature', signature_index),
286
if chk_index is not None:
287
indices.append(('chk', chk_index))
288
self.index_sizes.append(None)
289
for index_type, index in indices:
290
offset = self.index_offset(index_type)
291
self.index_sizes[offset] = index._size
292
self.index_class = pack_collection._index_class
293
self._pack_collection = pack_collection
294
self._state = 'resumed'
295
# XXX: perhaps check that the .pack file exists?
297
def access_tuple(self):
298
if self._state == 'finished':
299
return Pack.access_tuple(self)
300
elif self._state == 'resumed':
301
return self.upload_transport, self.file_name()
303
raise AssertionError(self._state)
306
self.upload_transport.delete(self.file_name())
307
indices = [self.revision_index, self.inventory_index, self.text_index,
308
self.signature_index]
309
if self.chk_index is not None:
310
indices.append(self.chk_index)
311
for index in indices:
312
index._transport.delete(index._name)
315
self._check_references()
316
index_types = ['revision', 'inventory', 'text', 'signature']
317
if self.chk_index is not None:
318
index_types.append('chk')
319
for index_type in index_types:
320
old_name = self.index_name(index_type, self.name)
321
new_name = '../indices/' + old_name
322
self.upload_transport.rename(old_name, new_name)
323
self._replace_index_with_readonly(index_type)
324
new_name = '../packs/' + self.file_name()
325
self.upload_transport.rename(self.file_name(), new_name)
326
self._state = 'finished'
328
def _get_external_refs(self, index):
329
"""Return compression parents for this index that are not present.
331
This returns any compression parents that are referenced by this index,
332
which are not contained *in* this index. They may be present elsewhere.
334
return index.external_references(1)
221
337
class NewPack(Pack):
222
338
"""An in memory proxy for a pack which is being created."""
224
# A map of index 'type' to the file extension and position in the
226
index_definitions = {
227
'revision': ('.rix', 0),
228
'inventory': ('.iix', 1),
230
'signature': ('.six', 3),
233
def __init__(self, upload_transport, index_transport, pack_transport,
234
upload_suffix='', file_mode=None):
340
def __init__(self, pack_collection, upload_suffix='', file_mode=None):
235
341
"""Create a NewPack instance.
237
:param upload_transport: A writable transport for the pack to be
238
incrementally uploaded to.
239
:param index_transport: A writable transport for the pack's indices to
240
be written to when the pack is finished.
241
:param pack_transport: A writable transport for the pack to be renamed
242
to when the upload is complete. This *must* be the same as
243
upload_transport.clone('../packs').
343
:param pack_collection: A PackCollection into which this is being inserted.
244
344
:param upload_suffix: An optional suffix to be given to any temporary
245
345
files created during the pack creation. e.g '.autopack'
246
:param file_mode: An optional file mode to create the new files with.
346
:param file_mode: Unix permissions for newly created file.
248
348
# The relative locations of the packs are constrained, but all are
249
349
# passed in because the caller has them, so as to avoid object churn.
350
index_builder_class = pack_collection._index_builder_class
351
if pack_collection.chk_index is not None:
352
chk_index = index_builder_class(reference_lists=0)
250
355
Pack.__init__(self,
251
356
# Revisions: parents list, no text compression.
252
InMemoryGraphIndex(reference_lists=1),
357
index_builder_class(reference_lists=1),
253
358
# Inventory: We want to map compression only, but currently the
254
359
# knit code hasn't been updated enough to understand that, so we
255
360
# have a regular 2-list index giving parents and compression
257
InMemoryGraphIndex(reference_lists=2),
362
index_builder_class(reference_lists=2),
258
363
# Texts: compression and per file graph, for all fileids - so two
259
364
# reference lists and two elements in the key tuple.
260
InMemoryGraphIndex(reference_lists=2, key_elements=2),
365
index_builder_class(reference_lists=2, key_elements=2),
261
366
# Signatures: Just blobs to store, no compression, no parents
263
InMemoryGraphIndex(reference_lists=0),
368
index_builder_class(reference_lists=0),
369
# CHK based storage - just blobs, no compression or parents.
372
self._pack_collection = pack_collection
373
# When we make readonly indices, we need this.
374
self.index_class = pack_collection._index_class
265
375
# where should the new pack be opened
266
self.upload_transport = upload_transport
376
self.upload_transport = pack_collection._upload_transport
267
377
# where are indices written out to
268
self.index_transport = index_transport
378
self.index_transport = pack_collection._index_transport
269
379
# where is the pack renamed to when it is finished?
270
self.pack_transport = pack_transport
380
self.pack_transport = pack_collection._pack_transport
271
381
# What file mode to upload the pack and indices with.
272
382
self._file_mode = file_mode
273
383
# tracks the content written to the .pack file.
274
self._hash = md5.new()
275
# a four-tuple with the length in bytes of the indices, once the pack
276
# is finalised. (rev, inv, text, sigs)
384
self._hash = osutils.md5()
385
# a tuple with the length in bytes of the indices, once the pack
386
# is finalised. (rev, inv, text, sigs, chk_if_in_use)
277
387
self.index_sizes = None
278
388
# How much data to cache when writing packs. Note that this is not
279
389
# synchronised with reads, because it's not in the transport layer, so
1122
1370
class RepositoryPackCollection(object):
1123
"""Management of packs within a repository."""
1371
"""Management of packs within a repository.
1373
:ivar _names: map of {pack_name: (index_size,)}
1376
pack_factory = NewPack
1377
resumed_pack_factory = ResumedPack
1125
1379
def __init__(self, repo, transport, index_transport, upload_transport,
1380
pack_transport, index_builder_class, index_class,
1127
1382
"""Create a new RepositoryPackCollection.
1129
:param transport: Addresses the repository base directory
1384
:param transport: Addresses the repository base directory
1130
1385
(typically .bzr/repository/).
1131
1386
:param index_transport: Addresses the directory containing indices.
1132
1387
:param upload_transport: Addresses the directory into which packs are written
1133
1388
while they're being created.
1134
1389
:param pack_transport: Addresses the directory of existing complete packs.
1390
:param index_builder_class: The index builder class to use.
1391
:param index_class: The index class to use.
1392
:param use_chk_index: Whether to setup and manage a CHK index.
1394
# XXX: This should call self.reset()
1136
1395
self.repo = repo
1137
1396
self.transport = transport
1138
1397
self._index_transport = index_transport
1139
1398
self._upload_transport = upload_transport
1140
1399
self._pack_transport = pack_transport
1141
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3}
1400
self._index_builder_class = index_builder_class
1401
self._index_class = index_class
1402
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3,
1142
1404
self.packs = []
1143
1405
# name:Pack mapping
1144
1407
self._packs_by_name = {}
1145
1408
# the previous pack-names content
1146
1409
self._packs_at_load = None
1147
1410
# when a pack is being created by this object, the state of that pack.
1148
1411
self._new_pack = None
1149
1412
# aggregated revision index data
1150
self.revision_index = AggregateIndex()
1151
self.inventory_index = AggregateIndex()
1152
self.text_index = AggregateIndex()
1153
self.signature_index = AggregateIndex()
1413
flush = self._flush_new_pack
1414
self.revision_index = AggregateIndex(self.reload_pack_names, flush)
1415
self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1416
self.text_index = AggregateIndex(self.reload_pack_names, flush)
1417
self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1419
self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1421
# used to determine if we're using a chk_index elsewhere.
1422
self.chk_index = None
1424
self._resumed_packs = []
1155
1426
def add_pack_to_memory(self, pack):
1156
1427
"""Make a Pack object available to the repository to satisfy queries.
1158
1429
:param pack: A Pack object.
1160
1431
if pack.name in self._packs_by_name:
1161
raise AssertionError()
1432
raise AssertionError(
1433
'pack %s already in _packs_by_name' % (pack.name,))
1162
1434
self.packs.append(pack)
1163
1435
self._packs_by_name[pack.name] = pack
1164
1436
self.revision_index.add_index(pack.revision_index, pack)
1165
1437
self.inventory_index.add_index(pack.inventory_index, pack)
1166
1438
self.text_index.add_index(pack.text_index, pack)
1167
1439
self.signature_index.add_index(pack.signature_index, pack)
1169
def _add_text_to_weave(self, file_id, revision_id, new_lines, parents,
1170
nostore_sha, random_revid):
1171
file_id_index = GraphIndexPrefixAdapter(
1172
self.text_index.combined_index,
1174
add_nodes_callback=self.text_index.add_callback)
1175
self.repo._text_knit._index._graph_index = file_id_index
1176
self.repo._text_knit._index._add_callback = file_id_index.add_nodes
1177
return self.repo._text_knit.add_lines_with_ghosts(
1178
revision_id, parents, new_lines, nostore_sha=nostore_sha,
1179
random_id=random_revid, check_content=False)[0:2]
1440
if self.chk_index is not None:
1441
self.chk_index.add_index(pack.chk_index, pack)
1181
1443
def all_packs(self):
1182
1444
"""Return a list of all the Pack objects this repository has.
1229
1495
# group their data with the relevant commit, and that may
1230
1496
# involve rewriting ancient history - which autopack tries to
1231
1497
# avoid. Alternatively we could not group the data but treat
1232
# each of these as having a single revision, and thus add
1498
# each of these as having a single revision, and thus add
1233
1499
# one revision for each to the total revision count, to get
1234
1500
# a matching distribution.
1236
1502
existing_packs.append((revision_count, pack))
1237
1503
pack_operations = self.plan_autopack_combinations(
1238
1504
existing_packs, pack_distribution)
1239
self._execute_pack_operations(pack_operations)
1505
num_new_packs = len(pack_operations)
1506
num_old_packs = sum([len(po[1]) for po in pack_operations])
1507
num_revs_affected = sum([po[0] for po in pack_operations])
1508
mutter('Auto-packing repository %s, which has %d pack files, '
1509
'containing %d revisions. Packing %d files into %d affecting %d'
1510
' revisions', self, total_packs, total_revisions, num_old_packs,
1511
num_new_packs, num_revs_affected)
1512
result = self._execute_pack_operations(pack_operations,
1513
reload_func=self._restart_autopack)
1514
mutter('Auto-packing repository %s completed', self)
1242
def _execute_pack_operations(self, pack_operations, _packer_class=Packer):
1517
def _execute_pack_operations(self, pack_operations, _packer_class=Packer,
1243
1519
"""Execute a series of pack operations.
1245
1521
:param pack_operations: A list of [revision_count, packs_to_combine].
1246
1522
:param _packer_class: The class of packer to use (default: Packer).
1523
:return: The new pack names.
1249
1525
for revision_count, packs in pack_operations:
1250
1526
# we may have no-ops from the setup logic
1251
1527
if len(packs) == 0:
1253
_packer_class(self, packs, '.autopack').pack()
1529
packer = _packer_class(self, packs, '.autopack',
1530
reload_func=reload_func)
1533
except errors.RetryWithNewPacks:
1534
# An exception is propagating out of this context, make sure
1535
# this packer has cleaned up. Packer() doesn't set its new_pack
1536
# state into the RepositoryPackCollection object, so we only
1537
# have access to it directly here.
1538
if packer.new_pack is not None:
1539
packer.new_pack.abort()
1254
1541
for pack in packs:
1255
1542
self._remove_pack_from_memory(pack)
1256
1543
# record the newly available packs and stop advertising the old
1258
self._save_pack_names(clear_obsolete_packs=True)
1545
result = self._save_pack_names(clear_obsolete_packs=True)
1259
1546
# Move the old packs out of the way now they are no longer referenced.
1260
1547
for revision_count, packs in pack_operations:
1261
1548
self._obsolete_packs(packs)
1551
def _flush_new_pack(self):
1552
if self._new_pack is not None:
1553
self._new_pack.flush()
1263
1555
def lock_names(self):
1264
1556
"""Acquire the mutex around the pack-names index.
1266
1558
This cannot be used in the middle of a read-only transaction on the
1269
1561
self.repo.control_files.lock_write()
1563
def _already_packed(self):
1564
"""Is the collection already packed?"""
1565
return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1567
def pack(self, hint=None):
1272
1568
"""Pack the pack collection totally."""
1273
1569
self.ensure_loaded()
1274
1570
total_packs = len(self._names)
1276
# This is arguably wrong because we might not be optimal, but for
1277
# now lets leave it in. (e.g. reconcile -> one pack. But not
1571
if self._already_packed():
1280
1573
total_revisions = self.revision_index.combined_index.key_count()
1281
1574
# XXX: the following may want to be a class, to pack with a given
1283
1576
mutter('Packing repository %s, which has %d pack files, '
1284
'containing %d revisions into 1 packs.', self, total_packs,
1577
'containing %d revisions with hint %r.', self, total_packs,
1578
total_revisions, hint)
1286
1579
# determine which packs need changing
1287
pack_distribution = [1]
1288
1580
pack_operations = [[0, []]]
1289
1581
for pack in self.all_packs():
1290
pack_operations[-1][0] += pack.get_revision_count()
1291
pack_operations[-1][1].append(pack)
1582
if hint is None or pack.name in hint:
1583
# Either no hint was provided (so we are packing everything),
1584
# or this pack was included in the hint.
1585
pack_operations[-1][0] += pack.get_revision_count()
1586
pack_operations[-1][1].append(pack)
1292
1587
self._execute_pack_operations(pack_operations, OptimisingPacker)
1294
1589
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1498
1857
self._packs_by_name = {}
1499
1858
self._packs_at_load = None
1501
def _make_index_map(self, index_suffix):
1502
"""Return information on existing indices.
1504
:param suffix: Index suffix added to pack name.
1506
:returns: (pack_map, indices) where indices is a list of GraphIndex
1507
objects, and pack_map is a mapping from those objects to the
1508
pack tuple they describe.
1510
# TODO: stop using this; it creates new indices unnecessarily.
1511
self.ensure_loaded()
1512
suffix_map = {'.rix': 'revision_index',
1513
'.six': 'signature_index',
1514
'.iix': 'inventory_index',
1515
'.tix': 'text_index',
1517
return self._packs_list_to_pack_map_and_index_list(self.all_packs(),
1518
suffix_map[index_suffix])
1520
def _packs_list_to_pack_map_and_index_list(self, packs, index_attribute):
1521
"""Convert a list of packs to an index pack map and index list.
1523
:param packs: The packs list to process.
1524
:param index_attribute: The attribute that the desired index is found
1526
:return: A tuple (map, list) where map contains the dict from
1527
index:pack_tuple, and lsit contains the indices in the same order
1533
index = getattr(pack, index_attribute)
1534
indices.append(index)
1535
pack_map[index] = (pack.pack_transport, pack.file_name())
1536
return pack_map, indices
1538
def _index_contents(self, pack_map, key_filter=None):
1539
"""Get an iterable of the index contents from a pack_map.
1541
:param pack_map: A map from indices to pack details.
1542
:param key_filter: An optional filter to limit the
1545
indices = [index for index in pack_map.iterkeys()]
1546
all_index = CombinedGraphIndex(indices)
1547
if key_filter is None:
1548
return all_index.iter_all_entries()
1550
return all_index.iter_entries(key_filter)
1552
1860
def _unlock_names(self):
1553
1861
"""Release the mutex around the pack-names index."""
1554
1862
self.repo.control_files.unlock()
1556
def _save_pack_names(self, clear_obsolete_packs=False):
1557
"""Save the list of packs.
1559
This will take out the mutex around the pack names list for the
1560
duration of the method call. If concurrent updates have been made, a
1561
three-way merge between the current list and the current in memory list
1564
:param clear_obsolete_packs: If True, clear out the contents of the
1565
obsolete_packs directory.
1569
builder = GraphIndexBuilder()
1570
# load the disk nodes across
1572
for index, key, value in self._iter_disk_pack_index():
1573
disk_nodes.add((key, value))
1574
# do a two-way diff against our original content
1575
current_nodes = set()
1576
for name, sizes in self._names.iteritems():
1578
((name, ), ' '.join(str(size) for size in sizes)))
1579
deleted_nodes = self._packs_at_load - current_nodes
1580
new_nodes = current_nodes - self._packs_at_load
1581
disk_nodes.difference_update(deleted_nodes)
1582
disk_nodes.update(new_nodes)
1583
# TODO: handle same-name, index-size-changes here -
1584
# e.g. use the value from disk, not ours, *unless* we're the one
1586
for key, value in disk_nodes:
1587
builder.add_node(key, value)
1588
self.transport.put_file('pack-names', builder.finish(),
1589
mode=self.repo.control_files._file_mode)
1590
# move the baseline forward
1591
self._packs_at_load = disk_nodes
1592
# now clear out the obsolete packs directory
1593
if clear_obsolete_packs:
1594
self.transport.clone('obsolete_packs').delete_multi(
1595
self.transport.list_dir('obsolete_packs'))
1597
self._unlock_names()
1598
# synchronise the memory packs list with what we just wrote:
1864
def _diff_pack_names(self):
1865
"""Read the pack names from disk, and compare it to the one in memory.
1867
:return: (disk_nodes, deleted_nodes, new_nodes)
1868
disk_nodes The final set of nodes that should be referenced
1869
deleted_nodes Nodes which have been removed from when we started
1870
new_nodes Nodes that are newly introduced
1872
# load the disk nodes across
1874
for index, key, value in self._iter_disk_pack_index():
1875
disk_nodes.add((key, value))
1877
# do a two-way diff against our original content
1878
current_nodes = set()
1879
for name, sizes in self._names.iteritems():
1881
((name, ), ' '.join(str(size) for size in sizes)))
1883
# Packs no longer present in the repository, which were present when we
1884
# locked the repository
1885
deleted_nodes = self._packs_at_load - current_nodes
1886
# Packs which this process is adding
1887
new_nodes = current_nodes - self._packs_at_load
1889
# Update the disk_nodes set to include the ones we are adding, and
1890
# remove the ones which were removed by someone else
1891
disk_nodes.difference_update(deleted_nodes)
1892
disk_nodes.update(new_nodes)
1894
return disk_nodes, deleted_nodes, new_nodes
1896
def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1897
"""Given the correct set of pack files, update our saved info.
1899
:return: (removed, added, modified)
1900
removed pack names removed from self._names
1901
added pack names added to self._names
1902
modified pack names that had changed value
1907
## self._packs_at_load = disk_nodes
1599
1908
new_names = dict(disk_nodes)
1600
1909
# drop no longer present nodes
1601
1910
for pack in self.all_packs():
1602
1911
if (pack.name,) not in new_names:
1912
removed.append(pack.name)
1603
1913
self._remove_pack_from_memory(pack)
1604
1914
# add new nodes/refresh existing ones
1605
1915
for key, value in disk_nodes:
1619
1929
self._remove_pack_from_memory(self.get_pack_by_name(name))
1620
1930
self._names[name] = sizes
1621
1931
self.get_pack_by_name(name)
1932
modified.append(name)
1624
1935
self._names[name] = sizes
1625
1936
self.get_pack_by_name(name)
1938
return removed, added, modified
1940
def _save_pack_names(self, clear_obsolete_packs=False):
1941
"""Save the list of packs.
1943
This will take out the mutex around the pack names list for the
1944
duration of the method call. If concurrent updates have been made, a
1945
three-way merge between the current list and the current in memory list
1948
:param clear_obsolete_packs: If True, clear out the contents of the
1949
obsolete_packs directory.
1950
:return: A list of the names saved that were not previously on disk.
1954
builder = self._index_builder_class()
1955
disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
1956
# TODO: handle same-name, index-size-changes here -
1957
# e.g. use the value from disk, not ours, *unless* we're the one
1959
for key, value in disk_nodes:
1960
builder.add_node(key, value)
1961
self.transport.put_file('pack-names', builder.finish(),
1962
mode=self.repo.bzrdir._get_file_mode())
1963
# move the baseline forward
1964
self._packs_at_load = disk_nodes
1965
if clear_obsolete_packs:
1966
self._clear_obsolete_packs()
1968
self._unlock_names()
1969
# synchronise the memory packs list with what we just wrote:
1970
self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1971
return [new_node[0][0] for new_node in new_nodes]
1973
def reload_pack_names(self):
1974
"""Sync our pack listing with what is present in the repository.
1976
This should be called when we find out that something we thought was
1977
present is now missing. This happens when another process re-packs the
1980
:return: True if the in-memory list of packs has been altered at all.
1982
# The ensure_loaded call is to handle the case where the first call
1983
# made involving the collection was to reload_pack_names, where we
1984
# don't have a view of disk contents. Its a bit of a bandaid, and
1985
# causes two reads of pack-names, but its a rare corner case not struck
1986
# with regular push/pull etc.
1987
first_read = self.ensure_loaded()
1990
# out the new value.
1991
disk_nodes, _, _ = self._diff_pack_names()
1992
self._packs_at_load = disk_nodes
1994
modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1995
if removed or added or modified:
1999
def _restart_autopack(self):
2000
"""Reload the pack names list, and restart the autopack code."""
2001
if not self.reload_pack_names():
2002
# Re-raise the original exception, because something went missing
2003
# and a restart didn't find it
2005
raise errors.RetryAutopack(self.repo, False, sys.exc_info())
2007
def _clear_obsolete_packs(self):
2008
"""Delete everything from the obsolete-packs directory.
2010
obsolete_pack_transport = self.transport.clone('obsolete_packs')
2011
for filename in obsolete_pack_transport.list_dir('.'):
2013
obsolete_pack_transport.delete(filename)
2014
except (errors.PathError, errors.TransportError), e:
2015
warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
1627
2017
def _start_write_group(self):
1628
2018
# Do not permit preparation for writing if we're not in a 'write lock'.
1629
2019
if not self.repo.is_write_locked():
1630
2020
raise errors.NotWriteLocked(self)
1631
self._new_pack = NewPack(self._upload_transport, self._index_transport,
1632
self._pack_transport, upload_suffix='.pack',
1633
file_mode=self.repo.control_files._file_mode)
2021
self._new_pack = self.pack_factory(self, upload_suffix='.pack',
2022
file_mode=self.repo.bzrdir._get_file_mode())
1634
2023
# allow writing: queue writes to a new index
1635
2024
self.revision_index.add_writable_index(self._new_pack.revision_index,
1636
2025
self._new_pack)
1638
2027
self._new_pack)
1639
2028
self.text_index.add_writable_index(self._new_pack.text_index,
1640
2029
self._new_pack)
2030
self._new_pack.text_index.set_optimize(combine_backing_indices=False)
1641
2031
self.signature_index.add_writable_index(self._new_pack.signature_index,
1642
2032
self._new_pack)
2033
if self.chk_index is not None:
2034
self.chk_index.add_writable_index(self._new_pack.chk_index,
2036
self.repo.chk_bytes._index._add_callback = self.chk_index.add_callback
2037
self._new_pack.chk_index.set_optimize(combine_backing_indices=False)
1644
# reused revision and signature knits may need updating
1646
# "Hysterical raisins. client code in bzrlib grabs those knits outside
1647
# of write groups and then mutates it inside the write group."
1648
if self.repo._revision_knit is not None:
1649
self.repo._revision_knit._index._add_callback = \
1650
self.revision_index.add_callback
1651
if self.repo._signature_knit is not None:
1652
self.repo._signature_knit._index._add_callback = \
1653
self.signature_index.add_callback
1654
# create a reused knit object for text addition in commit.
1655
self.repo._text_knit = self.repo.weave_store.get_weave_or_empty(
2039
self.repo.inventories._index._add_callback = self.inventory_index.add_callback
2040
self.repo.revisions._index._add_callback = self.revision_index.add_callback
2041
self.repo.signatures._index._add_callback = self.signature_index.add_callback
2042
self.repo.texts._index._add_callback = self.text_index.add_callback
1658
2044
def _abort_write_group(self):
1659
2045
# FIXME: just drop the transient index.
1660
2046
# forget what names there are
1661
2047
if self._new_pack is not None:
1662
self._new_pack.abort()
1663
self._remove_pack_indices(self._new_pack)
1664
self._new_pack = None
1665
self.repo._text_knit = None
2049
self._new_pack.abort()
2051
# XXX: If we aborted while in the middle of finishing the write
2052
# group, _remove_pack_indices can fail because the indexes are
2053
# already gone. If they're not there we shouldn't fail in this
2054
# case. -- mbp 20081113
2055
self._remove_pack_indices(self._new_pack)
2056
self._new_pack = None
2057
for resumed_pack in self._resumed_packs:
2059
resumed_pack.abort()
2061
# See comment in previous finally block.
2063
self._remove_pack_indices(resumed_pack)
2066
del self._resumed_packs[:]
2068
def _remove_resumed_pack_indices(self):
2069
for resumed_pack in self._resumed_packs:
2070
self._remove_pack_indices(resumed_pack)
2071
del self._resumed_packs[:]
2073
def _check_new_inventories(self):
2074
"""Detect missing inventories in this write group.
2076
:returns: list of strs, summarising any problems found. If the list is
2077
empty no problems were found.
2079
# The base implementation does no checks. GCRepositoryPackCollection
1667
2083
def _commit_write_group(self):
2085
for prefix, versioned_file in (
2086
('revisions', self.repo.revisions),
2087
('inventories', self.repo.inventories),
2088
('texts', self.repo.texts),
2089
('signatures', self.repo.signatures),
2091
missing = versioned_file.get_missing_compression_parent_keys()
2092
all_missing.update([(prefix,) + key for key in missing])
2094
raise errors.BzrCheckError(
2095
"Repository %s has missing compression parent(s) %r "
2096
% (self.repo, sorted(all_missing)))
2097
problems = self._check_new_inventories()
2099
problems_summary = '\n'.join(problems)
2100
raise errors.BzrCheckError(
2101
"Cannot add revision(s) to repository: " + problems_summary)
1668
2102
self._remove_pack_indices(self._new_pack)
2103
any_new_content = False
1669
2104
if self._new_pack.data_inserted():
1670
2105
# get all the data to disk and read to use
1671
2106
self._new_pack.finish()
1672
2107
self.allocate(self._new_pack)
1673
2108
self._new_pack = None
1674
if not self.autopack():
2109
any_new_content = True
2111
self._new_pack.abort()
2112
self._new_pack = None
2113
for resumed_pack in self._resumed_packs:
2114
# XXX: this is a pretty ugly way to turn the resumed pack into a
2115
# properly committed pack.
2116
self._names[resumed_pack.name] = None
2117
self._remove_pack_from_memory(resumed_pack)
2118
resumed_pack.finish()
2119
self.allocate(resumed_pack)
2120
any_new_content = True
2121
del self._resumed_packs[:]
2123
result = self.autopack()
1675
2125
# when autopack takes no steps, the names list is still
1677
self._save_pack_names()
2127
return self._save_pack_names()
2131
def _suspend_write_group(self):
2132
tokens = [pack.name for pack in self._resumed_packs]
2133
self._remove_pack_indices(self._new_pack)
2134
if self._new_pack.data_inserted():
2135
# get all the data to disk and read to use
2136
self._new_pack.finish(suspend=True)
2137
tokens.append(self._new_pack.name)
2138
self._new_pack = None
1679
2140
self._new_pack.abort()
1680
2141
self._new_pack = None
1681
self.repo._text_knit = None
1684
class KnitPackRevisionStore(KnitRevisionStore):
1685
"""An object to adapt access from RevisionStore's to use KnitPacks.
1687
This class works by replacing the original RevisionStore.
1688
We need to do this because the KnitPackRevisionStore is less
1689
isolated in its layering - it uses services from the repo.
1692
def __init__(self, repo, transport, revisionstore):
1693
"""Create a KnitPackRevisionStore on repo with revisionstore.
1695
This will store its state in the Repository, use the
1696
indices to provide a KnitGraphIndex,
1697
and at the end of transactions write new indices.
1699
KnitRevisionStore.__init__(self, revisionstore.versioned_file_store)
1701
self._serializer = revisionstore._serializer
1702
self.transport = transport
1704
def get_revision_file(self, transaction):
1705
"""Get the revision versioned file object."""
1706
if getattr(self.repo, '_revision_knit', None) is not None:
1707
return self.repo._revision_knit
1708
self.repo._pack_collection.ensure_loaded()
1709
add_callback = self.repo._pack_collection.revision_index.add_callback
1710
# setup knit specific objects
1711
knit_index = KnitGraphIndex(
1712
self.repo._pack_collection.revision_index.combined_index,
1713
add_callback=add_callback)
1714
self.repo._revision_knit = knit.KnitVersionedFile(
1715
'revisions', self.transport.clone('..'),
1716
self.repo.control_files._file_mode,
1718
index=knit_index, delta=False, factory=knit.KnitPlainFactory(),
1719
access_method=self.repo._pack_collection.revision_index.knit_access)
1720
return self.repo._revision_knit
1722
def get_signature_file(self, transaction):
1723
"""Get the signature versioned file object."""
1724
if getattr(self.repo, '_signature_knit', None) is not None:
1725
return self.repo._signature_knit
1726
self.repo._pack_collection.ensure_loaded()
1727
add_callback = self.repo._pack_collection.signature_index.add_callback
1728
# setup knit specific objects
1729
knit_index = KnitGraphIndex(
1730
self.repo._pack_collection.signature_index.combined_index,
1731
add_callback=add_callback, parents=False)
1732
self.repo._signature_knit = knit.KnitVersionedFile(
1733
'signatures', self.transport.clone('..'),
1734
self.repo.control_files._file_mode,
1736
index=knit_index, delta=False, factory=knit.KnitPlainFactory(),
1737
access_method=self.repo._pack_collection.signature_index.knit_access)
1738
return self.repo._signature_knit
1741
class KnitPackTextStore(VersionedFileStore):
1742
"""Presents a TextStore abstraction on top of packs.
1744
This class works by replacing the original VersionedFileStore.
1745
We need to do this because the KnitPackRevisionStore is less
1746
isolated in its layering - it uses services from the repo and shares them
1747
with all the data written in a single write group.
1750
def __init__(self, repo, transport, weavestore):
1751
"""Create a KnitPackTextStore on repo with weavestore.
1753
This will store its state in the Repository, use the
1754
indices FileNames to provide a KnitGraphIndex,
1755
and at the end of transactions write new indices.
1757
# don't call base class constructor - it's not suitable.
1758
# no transient data stored in the transaction
1760
self._precious = False
1762
self.transport = transport
1763
self.weavestore = weavestore
1764
# XXX for check() which isn't updated yet
1765
self._transport = weavestore._transport
1767
def get_weave_or_empty(self, file_id, transaction):
1768
"""Get a 'Knit' backed by the .tix indices.
1770
The transaction parameter is ignored.
1772
self.repo._pack_collection.ensure_loaded()
1773
add_callback = self.repo._pack_collection.text_index.add_callback
1774
# setup knit specific objects
1775
file_id_index = GraphIndexPrefixAdapter(
1776
self.repo._pack_collection.text_index.combined_index,
1777
(file_id, ), 1, add_nodes_callback=add_callback)
1778
knit_index = KnitGraphIndex(file_id_index,
1779
add_callback=file_id_index.add_nodes,
1780
deltas=True, parents=True)
1781
return knit.KnitVersionedFile('text:' + file_id,
1782
self.transport.clone('..'),
1785
access_method=self.repo._pack_collection.text_index.knit_access,
1786
factory=knit.KnitPlainFactory())
1788
get_weave = get_weave_or_empty
1791
"""Generate a list of the fileids inserted, for use by check."""
1792
self.repo._pack_collection.ensure_loaded()
1794
for index, key, value, refs in \
1795
self.repo._pack_collection.text_index.combined_index.iter_all_entries():
1800
class InventoryKnitThunk(object):
1801
"""An object to manage thunking get_inventory_weave to pack based knits."""
1803
def __init__(self, repo, transport):
1804
"""Create an InventoryKnitThunk for repo at transport.
1806
This will store its state in the Repository, use the
1807
indices FileNames to provide a KnitGraphIndex,
1808
and at the end of transactions write a new index..
1811
self.transport = transport
1813
def get_weave(self):
1814
"""Get a 'Knit' that contains inventory data."""
1815
self.repo._pack_collection.ensure_loaded()
1816
add_callback = self.repo._pack_collection.inventory_index.add_callback
1817
# setup knit specific objects
1818
knit_index = KnitGraphIndex(
1819
self.repo._pack_collection.inventory_index.combined_index,
1820
add_callback=add_callback, deltas=True, parents=True)
1821
return knit.KnitVersionedFile(
1822
'inventory', self.transport.clone('..'),
1823
self.repo.control_files._file_mode,
1825
index=knit_index, delta=True, factory=knit.KnitPlainFactory(),
1826
access_method=self.repo._pack_collection.inventory_index.knit_access)
2142
self._remove_resumed_pack_indices()
2145
def _resume_write_group(self, tokens):
2146
for token in tokens:
2147
self._resume_pack(token)
1829
2150
class KnitPackRepository(KnitRepository):
1830
"""Experimental graph-knit using repository."""
1832
def __init__(self, _format, a_bzrdir, control_files, _revision_store,
1833
control_store, text_store, _commit_builder_class, _serializer):
2151
"""Repository with knit objects stored inside pack containers.
2153
The layering for a KnitPackRepository is:
2155
Graph | HPSS | Repository public layer |
2156
===================================================
2157
Tuple based apis below, string based, and key based apis above
2158
---------------------------------------------------
2160
Provides .texts, .revisions etc
2161
This adapts the N-tuple keys to physical knit records which only have a
2162
single string identifier (for historical reasons), which in older formats
2163
was always the revision_id, and in the mapped code for packs is always
2164
the last element of key tuples.
2165
---------------------------------------------------
2167
A separate GraphIndex is used for each of the
2168
texts/inventories/revisions/signatures contained within each individual
2169
pack file. The GraphIndex layer works in N-tuples and is unaware of any
2171
===================================================
2175
def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
1834
2177
KnitRepository.__init__(self, _format, a_bzrdir, control_files,
1835
_revision_store, control_store, text_store, _commit_builder_class,
1837
index_transport = control_files._transport.clone('indices')
1838
self._pack_collection = RepositoryPackCollection(self, control_files._transport,
2178
_commit_builder_class, _serializer)
2179
index_transport = self._transport.clone('indices')
2180
self._pack_collection = RepositoryPackCollection(self, self._transport,
1839
2181
index_transport,
1840
control_files._transport.clone('upload'),
1841
control_files._transport.clone('packs'))
1842
self._revision_store = KnitPackRevisionStore(self, index_transport, self._revision_store)
1843
self.weave_store = KnitPackTextStore(self, index_transport, self.weave_store)
1844
self._inv_thunk = InventoryKnitThunk(self, index_transport)
2182
self._transport.clone('upload'),
2183
self._transport.clone('packs'),
2184
_format.index_builder_class,
2185
_format.index_class,
2186
use_chk_index=self._format.supports_chks,
2188
self.inventories = KnitVersionedFiles(
2189
_KnitGraphIndex(self._pack_collection.inventory_index.combined_index,
2190
add_callback=self._pack_collection.inventory_index.add_callback,
2191
deltas=True, parents=True, is_locked=self.is_locked),
2192
data_access=self._pack_collection.inventory_index.data_access,
2193
max_delta_chain=200)
2194
self.revisions = KnitVersionedFiles(
2195
_KnitGraphIndex(self._pack_collection.revision_index.combined_index,
2196
add_callback=self._pack_collection.revision_index.add_callback,
2197
deltas=False, parents=True, is_locked=self.is_locked,
2198
track_external_parent_refs=True),
2199
data_access=self._pack_collection.revision_index.data_access,
2201
self.signatures = KnitVersionedFiles(
2202
_KnitGraphIndex(self._pack_collection.signature_index.combined_index,
2203
add_callback=self._pack_collection.signature_index.add_callback,
2204
deltas=False, parents=False, is_locked=self.is_locked),
2205
data_access=self._pack_collection.signature_index.data_access,
2207
self.texts = KnitVersionedFiles(
2208
_KnitGraphIndex(self._pack_collection.text_index.combined_index,
2209
add_callback=self._pack_collection.text_index.add_callback,
2210
deltas=True, parents=True, is_locked=self.is_locked),
2211
data_access=self._pack_collection.text_index.data_access,
2212
max_delta_chain=200)
2213
if _format.supports_chks:
2214
# No graph, no compression:- references from chks are between
2215
# different objects not temporal versions of the same; and without
2216
# some sort of temporal structure knit compression will just fail.
2217
self.chk_bytes = KnitVersionedFiles(
2218
_KnitGraphIndex(self._pack_collection.chk_index.combined_index,
2219
add_callback=self._pack_collection.chk_index.add_callback,
2220
deltas=False, parents=False, is_locked=self.is_locked),
2221
data_access=self._pack_collection.chk_index.data_access,
2224
self.chk_bytes = None
1845
2225
# True when the repository object is 'write locked' (as opposed to the
1846
# physical lock only taken out around changes to the pack-names list.)
2226
# physical lock only taken out around changes to the pack-names list.)
1847
2227
# Another way to represent this would be a decorator around the control
1848
2228
# files object that presents logical locks as physical ones - if this
1849
2229
# gets ugly consider that alternative design. RBC 20071011
1854
2234
self._reconcile_fixes_text_parents = True
1855
2235
self._reconcile_backsup_inventory = False
2237
def _warn_if_deprecated(self):
2238
# This class isn't deprecated, but one sub-format is
2239
if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2240
from bzrlib import repository
2241
if repository._deprecation_warning_done:
2243
repository._deprecation_warning_done = True
2244
warning("Format %s for %s is deprecated - please use"
2245
" 'bzr upgrade --1.6.1-rich-root'"
2246
% (self._format, self.bzrdir.transport.base))
1857
2248
def _abort_write_group(self):
2249
self.revisions._index._key_dependencies.clear()
1858
2250
self._pack_collection._abort_write_group()
1860
def _find_inconsistent_revision_parents(self):
1861
"""Find revisions with incorrectly cached parents.
1863
:returns: an iterator yielding tuples of (revison-id, parents-in-index,
1864
parents-in-revision).
1866
if not self.is_locked():
1867
raise errors.ObjectNotLocked(self)
1868
pb = ui.ui_factory.nested_progress_bar()
1871
revision_nodes = self._pack_collection.revision_index \
1872
.combined_index.iter_all_entries()
1873
index_positions = []
1874
# Get the cached index values for all revisions, and also the location
1875
# in each index of the revision text so we can perform linear IO.
1876
for index, key, value, refs in revision_nodes:
1877
pos, length = value[1:].split(' ')
1878
index_positions.append((index, int(pos), key[0],
1879
tuple(parent[0] for parent in refs[0])))
1880
pb.update("Reading revision index.", 0, 0)
1881
index_positions.sort()
1882
batch_count = len(index_positions) / 1000 + 1
1883
pb.update("Checking cached revision graph.", 0, batch_count)
1884
for offset in xrange(batch_count):
1885
pb.update("Checking cached revision graph.", offset)
1886
to_query = index_positions[offset * 1000:(offset + 1) * 1000]
1889
rev_ids = [item[2] for item in to_query]
1890
revs = self.get_revisions(rev_ids)
1891
for revision, item in zip(revs, to_query):
1892
index_parents = item[3]
1893
rev_parents = tuple(revision.parent_ids)
1894
if index_parents != rev_parents:
1895
result.append((revision.revision_id, index_parents, rev_parents))
1900
@symbol_versioning.deprecated_method(symbol_versioning.one_one)
1901
def get_parents(self, revision_ids):
1902
"""See graph._StackedParentsProvider.get_parents."""
1903
parent_map = self.get_parent_map(revision_ids)
1904
return [parent_map.get(r, None) for r in revision_ids]
1906
def get_parent_map(self, keys):
1907
"""See graph._StackedParentsProvider.get_parent_map
1909
This implementation accesses the combined revision index to provide
1912
self._pack_collection.ensure_loaded()
1913
index = self._pack_collection.revision_index.combined_index
1916
raise ValueError('get_parent_map(None) is not valid')
1917
if _mod_revision.NULL_REVISION in keys:
1918
keys.discard(_mod_revision.NULL_REVISION)
1919
found_parents = {_mod_revision.NULL_REVISION:()}
1922
search_keys = set((revision_id,) for revision_id in keys)
1923
for index, key, value, refs in index.iter_entries(search_keys):
1926
parents = (_mod_revision.NULL_REVISION,)
1928
parents = tuple(parent[0] for parent in parents)
1929
found_parents[key[0]] = parents
1930
return found_parents
1932
def has_revisions(self, revision_ids):
1933
"""See Repository.has_revisions()."""
1934
revision_ids = set(revision_ids)
1935
result = revision_ids.intersection(
1936
set([None, _mod_revision.NULL_REVISION]))
1937
revision_ids.difference_update(result)
1938
index = self._pack_collection.revision_index.combined_index
1939
keys = [(revision_id,) for revision_id in revision_ids]
1940
result.update(node[1][0] for node in index.iter_entries(keys))
2252
def _get_source(self, to_format):
2253
if to_format.network_name() == self._format.network_name():
2254
return KnitPackStreamSource(self, to_format)
2255
return super(KnitPackRepository, self)._get_source(to_format)
1943
2257
def _make_parents_provider(self):
1944
2258
return graph.CachingParentsProvider(self)
1946
2260
def _refresh_data(self):
1947
if self._write_lock_count == 1 or (
1948
self.control_files._lock_count == 1 and
1949
self.control_files._lock_mode == 'r'):
1950
# forget what names there are
1951
self._pack_collection.reset()
1952
# XXX: Better to do an in-memory merge when acquiring a new lock -
1953
# factor out code from _save_pack_names.
1954
self._pack_collection.ensure_loaded()
2261
if not self.is_locked():
2263
self._pack_collection.reload_pack_names()
1956
2265
def _start_write_group(self):
1957
2266
self._pack_collection._start_write_group()
1959
2268
def _commit_write_group(self):
1960
return self._pack_collection._commit_write_group()
1962
def get_inventory_weave(self):
1963
return self._inv_thunk.get_weave()
2269
hint = self._pack_collection._commit_write_group()
2270
self.revisions._index._key_dependencies.clear()
2273
def suspend_write_group(self):
2274
# XXX check self._write_group is self.get_transaction()?
2275
tokens = self._pack_collection._suspend_write_group()
2276
self.revisions._index._key_dependencies.clear()
2277
self._write_group = None
2280
def _resume_write_group(self, tokens):
2281
self._start_write_group()
2283
self._pack_collection._resume_write_group(tokens)
2284
except errors.UnresumableWriteGroup:
2285
self._abort_write_group()
2287
for pack in self._pack_collection._resumed_packs:
2288
self.revisions._index.scan_unvalidated_index(pack.revision_index)
1965
2290
def get_transaction(self):
1966
2291
if self._write_lock_count:
2033
2377
self.control_files.unlock()
2379
if not self.is_locked():
2380
for repo in self._fallback_repositories:
2384
class KnitPackStreamSource(StreamSource):
2385
"""A StreamSource used to transfer data between same-format KnitPack repos.
2387
This source assumes:
2388
1) Same serialization format for all objects
2389
2) Same root information
2390
3) XML format inventories
2391
4) Atomic inserts (so we can stream inventory texts before text
2396
def __init__(self, from_repository, to_format):
2397
super(KnitPackStreamSource, self).__init__(from_repository, to_format)
2398
self._text_keys = None
2399
self._text_fetch_order = 'unordered'
2401
def _get_filtered_inv_stream(self, revision_ids):
2402
from_repo = self.from_repository
2403
parent_ids = from_repo._find_parent_ids_of_revisions(revision_ids)
2404
parent_keys = [(p,) for p in parent_ids]
2405
find_text_keys = from_repo._find_text_key_references_from_xml_inventory_lines
2406
parent_text_keys = set(find_text_keys(
2407
from_repo._inventory_xml_lines_for_keys(parent_keys)))
2408
content_text_keys = set()
2409
knit = KnitVersionedFiles(None, None)
2410
factory = KnitPlainFactory()
2411
def find_text_keys_from_content(record):
2412
if record.storage_kind not in ('knit-delta-gz', 'knit-ft-gz'):
2413
raise ValueError("Unknown content storage kind for"
2414
" inventory text: %s" % (record.storage_kind,))
2415
# It's a knit record, it has a _raw_record field (even if it was
2416
# reconstituted from a network stream).
2417
raw_data = record._raw_record
2418
# read the entire thing
2419
revision_id = record.key[-1]
2420
content, _ = knit._parse_record(revision_id, raw_data)
2421
if record.storage_kind == 'knit-delta-gz':
2422
line_iterator = factory.get_linedelta_content(content)
2423
elif record.storage_kind == 'knit-ft-gz':
2424
line_iterator = factory.get_fulltext_content(content)
2425
content_text_keys.update(find_text_keys(
2426
[(line, revision_id) for line in line_iterator]))
2427
revision_keys = [(r,) for r in revision_ids]
2428
def _filtered_inv_stream():
2429
source_vf = from_repo.inventories
2430
stream = source_vf.get_record_stream(revision_keys,
2432
for record in stream:
2433
if record.storage_kind == 'absent':
2434
raise errors.NoSuchRevision(from_repo, record.key)
2435
find_text_keys_from_content(record)
2437
self._text_keys = content_text_keys - parent_text_keys
2438
return ('inventories', _filtered_inv_stream())
2440
def _get_text_stream(self):
2441
# Note: We know we don't have to handle adding root keys, because both
2442
# the source and target are the identical network name.
2443
text_stream = self.from_repository.texts.get_record_stream(
2444
self._text_keys, self._text_fetch_order, False)
2445
return ('texts', text_stream)
2447
def get_stream(self, search):
2448
revision_ids = search.get_keys()
2449
for stream_info in self._fetch_revision_texts(revision_ids):
2451
self._revision_keys = [(rev_id,) for rev_id in revision_ids]
2452
yield self._get_filtered_inv_stream(revision_ids)
2453
yield self._get_text_stream()
2036
2457
class RepositoryFormatPack(MetaDirRepositoryFormat):
2037
2458
"""Format logic for pack structured repositories.
2255
2640
return "Packs containing knits with rich root support\n"
2258
class RepositoryFormatPackDevelopment0(RepositoryFormatPack):
2259
"""A no-subtrees development repository.
2261
This format should be retained until the second release after bzr 1.0.
2263
No changes to the disk behaviour from pack-0.92.
2266
repository_class = KnitPackRepository
2267
_commit_builder_class = PackCommitBuilder
2268
_serializer = xml5.serializer_v5
2270
def _get_matching_bzrdir(self):
2271
return bzrdir.format_registry.make_bzrdir('development0')
2273
def _ignore_setting_bzrdir(self, format):
2276
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2278
def get_format_string(self):
2279
"""See RepositoryFormat.get_format_string()."""
2280
return "Bazaar development format 0 (needs bzr.dev from before 1.3)\n"
2282
def get_format_description(self):
2283
"""See RepositoryFormat.get_format_description()."""
2284
return ("Development repository format, currently the same as "
2287
def check_conversion_target(self, target_format):
2291
class RepositoryFormatPackDevelopment0Subtree(RepositoryFormatPack):
2643
class RepositoryFormatKnitPack5(RepositoryFormatPack):
2644
"""Repository that supports external references to allow stacking.
2648
Supports external lookups, which results in non-truncated ghosts after
2649
reconcile compared to pack-0.92 formats.
2652
repository_class = KnitPackRepository
2653
_commit_builder_class = PackCommitBuilder
2654
supports_external_lookups = True
2655
# What index classes to use
2656
index_builder_class = InMemoryGraphIndex
2657
index_class = GraphIndex
2660
def _serializer(self):
2661
return xml5.serializer_v5
2663
def _get_matching_bzrdir(self):
2664
return bzrdir.format_registry.make_bzrdir('1.6')
2666
def _ignore_setting_bzrdir(self, format):
2669
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2671
def get_format_string(self):
2672
"""See RepositoryFormat.get_format_string()."""
2673
return "Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n"
2675
def get_format_description(self):
2676
"""See RepositoryFormat.get_format_description()."""
2677
return "Packs 5 (adds stacking support, requires bzr 1.6)"
2680
class RepositoryFormatKnitPack5RichRoot(RepositoryFormatPack):
2681
"""A repository with rich roots and stacking.
2683
New in release 1.6.1.
2685
Supports stacking on other repositories, allowing data to be accessed
2686
without being stored locally.
2689
repository_class = KnitPackRepository
2690
_commit_builder_class = PackRootCommitBuilder
2691
rich_root_data = True
2692
supports_tree_reference = False # no subtrees
2693
supports_external_lookups = True
2694
# What index classes to use
2695
index_builder_class = InMemoryGraphIndex
2696
index_class = GraphIndex
2699
def _serializer(self):
2700
return xml6.serializer_v6
2702
def _get_matching_bzrdir(self):
2703
return bzrdir.format_registry.make_bzrdir(
2706
def _ignore_setting_bzrdir(self, format):
2709
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2711
def get_format_string(self):
2712
"""See RepositoryFormat.get_format_string()."""
2713
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
2715
def get_format_description(self):
2716
return "Packs 5 rich-root (adds stacking support, requires bzr 1.6.1)"
2719
class RepositoryFormatKnitPack5RichRootBroken(RepositoryFormatPack):
2720
"""A repository with rich roots and external references.
2724
Supports external lookups, which results in non-truncated ghosts after
2725
reconcile compared to pack-0.92 formats.
2727
This format was deprecated because the serializer it uses accidentally
2728
supported subtrees, when the format was not intended to. This meant that
2729
someone could accidentally fetch from an incorrect repository.
2732
repository_class = KnitPackRepository
2733
_commit_builder_class = PackRootCommitBuilder
2734
rich_root_data = True
2735
supports_tree_reference = False # no subtrees
2737
supports_external_lookups = True
2738
# What index classes to use
2739
index_builder_class = InMemoryGraphIndex
2740
index_class = GraphIndex
2743
def _serializer(self):
2744
return xml7.serializer_v7
2746
def _get_matching_bzrdir(self):
2747
matching = bzrdir.format_registry.make_bzrdir(
2749
matching.repository_format = self
2752
def _ignore_setting_bzrdir(self, format):
2755
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2757
def get_format_string(self):
2758
"""See RepositoryFormat.get_format_string()."""
2759
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
2761
def get_format_description(self):
2762
return ("Packs 5 rich-root (adds stacking support, requires bzr 1.6)"
2766
class RepositoryFormatKnitPack6(RepositoryFormatPack):
2767
"""A repository with stacking and btree indexes,
2768
without rich roots or subtrees.
2770
This is equivalent to pack-1.6 with B+Tree indices.
2773
repository_class = KnitPackRepository
2774
_commit_builder_class = PackCommitBuilder
2775
supports_external_lookups = True
2776
# What index classes to use
2777
index_builder_class = BTreeBuilder
2778
index_class = BTreeGraphIndex
2781
def _serializer(self):
2782
return xml5.serializer_v5
2784
def _get_matching_bzrdir(self):
2785
return bzrdir.format_registry.make_bzrdir('1.9')
2787
def _ignore_setting_bzrdir(self, format):
2790
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2792
def get_format_string(self):
2793
"""See RepositoryFormat.get_format_string()."""
2794
return "Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n"
2796
def get_format_description(self):
2797
"""See RepositoryFormat.get_format_description()."""
2798
return "Packs 6 (uses btree indexes, requires bzr 1.9)"
2801
class RepositoryFormatKnitPack6RichRoot(RepositoryFormatPack):
2802
"""A repository with rich roots, no subtrees, stacking and btree indexes.
2804
1.6-rich-root with B+Tree indices.
2807
repository_class = KnitPackRepository
2808
_commit_builder_class = PackRootCommitBuilder
2809
rich_root_data = True
2810
supports_tree_reference = False # no subtrees
2811
supports_external_lookups = True
2812
# What index classes to use
2813
index_builder_class = BTreeBuilder
2814
index_class = BTreeGraphIndex
2817
def _serializer(self):
2818
return xml6.serializer_v6
2820
def _get_matching_bzrdir(self):
2821
return bzrdir.format_registry.make_bzrdir(
2824
def _ignore_setting_bzrdir(self, format):
2827
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2829
def get_format_string(self):
2830
"""See RepositoryFormat.get_format_string()."""
2831
return "Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n"
2833
def get_format_description(self):
2834
return "Packs 6 rich-root (uses btree indexes, requires bzr 1.9)"
2837
class RepositoryFormatPackDevelopment2Subtree(RepositoryFormatPack):
2292
2838
"""A subtrees development repository.
2294
This format should be retained until the second release after bzr 1.0.
2296
No changes to the disk behaviour from pack-0.92-subtree.
2840
This format should be retained until the second release after bzr 1.7.
2842
1.6.1-subtree[as it might have been] with B+Tree indices.
2844
This is [now] retained until we have a CHK based subtree format in
2299
2848
repository_class = KnitPackRepository
2300
2849
_commit_builder_class = PackRootCommitBuilder
2301
2850
rich_root_data = True
2302
2851
supports_tree_reference = True
2303
_serializer = xml7.serializer_v7
2852
supports_external_lookups = True
2853
# What index classes to use
2854
index_builder_class = BTreeBuilder
2855
index_class = BTreeGraphIndex
2858
def _serializer(self):
2859
return xml7.serializer_v7
2305
2861
def _get_matching_bzrdir(self):
2306
2862
return bzrdir.format_registry.make_bzrdir(
2307
'development0-subtree')
2863
'development-subtree')
2309
2865
def _ignore_setting_bzrdir(self, format):
2312
2868
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2314
def check_conversion_target(self, target_format):
2315
if not target_format.rich_root_data:
2316
raise errors.BadConversionTarget(
2317
'Does not support rich root data.', target_format)
2318
if not getattr(target_format, 'supports_tree_reference', False):
2319
raise errors.BadConversionTarget(
2320
'Does not support nested trees', target_format)
2322
2870
def get_format_string(self):
2323
2871
"""See RepositoryFormat.get_format_string()."""
2324
return ("Bazaar development format 0 with subtree support "
2325
"(needs bzr.dev from before 1.3)\n")
2872
return ("Bazaar development format 2 with subtree support "
2873
"(needs bzr.dev from before 1.8)\n")
2327
2875
def get_format_description(self):
2328
2876
"""See RepositoryFormat.get_format_description()."""
2329
2877
return ("Development repository format, currently the same as "
2330
"pack-0.92-subtree\n")
2878
"1.6.1-subtree with B+Tree indices.\n")