86
85
# individual graph knits in packs (inventories)
87
86
# individual graph nocompression knits in packs (revisions)
88
87
# plain text knits in packs (texts)
88
len_one_adapter.scenarios = [
92
91
'factory':make_versioned_files_factory(WeaveFile,
93
92
ConstantMapper('inventory')),
96
'support_partial_insertion': False,
100
98
'factory':make_file_factory(False, ConstantMapper('revisions')),
103
'support_partial_insertion': False,
105
('named-nograph-nodelta-knit-pack', {
102
('named-nograph-knit-pack', {
106
103
'cleanup':cleanup_pack_knit,
107
104
'factory':make_pack_factory(False, False, 1),
110
'support_partial_insertion': False,
112
108
('named-graph-knit-pack', {
113
109
'cleanup':cleanup_pack_knit,
114
110
'factory':make_pack_factory(True, True, 1),
117
'support_partial_insertion': True,
119
114
('named-graph-nodelta-knit-pack', {
120
115
'cleanup':cleanup_pack_knit,
121
116
'factory':make_pack_factory(True, False, 1),
124
'support_partial_insertion': False,
126
('groupcompress-nograph', {
127
'cleanup':groupcompress.cleanup_pack_group,
128
'factory':groupcompress.make_pack_factory(False, False, 1),
131
'support_partial_insertion':False,
134
len_two_scenarios = [
121
len_two_adapter.scenarios = [
135
122
('weave-prefix', {
137
124
'factory':make_versioned_files_factory(WeaveFile,
141
'support_partial_insertion': False,
143
129
('annotated-knit-escape', {
145
131
'factory':make_file_factory(True, HashEscapedPrefixMapper()),
148
'support_partial_insertion': False,
150
135
('plain-knit-pack', {
151
136
'cleanup':cleanup_pack_knit,
152
137
'factory':make_pack_factory(True, True, 2),
155
'support_partial_insertion': True,
158
'cleanup':groupcompress.cleanup_pack_group,
159
'factory':groupcompress.make_pack_factory(True, False, 1),
162
'support_partial_insertion':False,
165
scenarios = len_one_scenarios + len_two_scenarios
166
return multiply_tests(to_adapt, scenarios, result)
142
for test in iter_suite_tests(to_adapt):
143
result.addTests(len_one_adapter.adapt(test))
144
result.addTests(len_two_adapter.adapt(test))
169
148
def get_diamond_vf(f, trailing_eol=True, left_only=False):
170
149
"""Get a diamond graph to exercise deltas and merges.
172
151
:param trailing_eol: If True end the last line with \n.
231
206
result = [prefix + suffix for suffix in suffix_list]
238
208
# we loop over each key because that spreads the inserts across prefixes,
239
209
# which is how commit operates.
240
210
for prefix in prefixes:
241
result.append(files.add_lines(prefix + get_key('origin'), (),
211
result.append(files.add_lines(prefix + ('origin',), (),
242
212
['origin' + last_char]))
243
213
for prefix in prefixes:
244
result.append(files.add_lines(prefix + get_key('base'),
214
result.append(files.add_lines(prefix + ('base',),
245
215
get_parents([('origin',)]), ['base' + last_char]))
246
216
for prefix in prefixes:
247
result.append(files.add_lines(prefix + get_key('left'),
217
result.append(files.add_lines(prefix + ('left',),
248
218
get_parents([('base',)]),
249
219
['base\n', 'left' + last_char]))
250
220
if not left_only:
251
221
for prefix in prefixes:
252
result.append(files.add_lines(prefix + get_key('right'),
222
result.append(files.add_lines(prefix + ('right',),
253
223
get_parents([('base',)]),
254
224
['base\n', 'right' + last_char]))
255
225
for prefix in prefixes:
256
result.append(files.add_lines(prefix + get_key('merged'),
226
result.append(files.add_lines(prefix + ('merged',),
257
227
get_parents([('left',), ('right',)]),
258
228
['base\n', 'left\n', 'right\n', 'merged' + last_char]))
769
740
self.assertEqual(expected, progress.updates)
771
742
lines = iter_with_versions(['child', 'otherchild'],
772
[('Walking content', 0, 2),
773
('Walking content', 1, 2),
774
('Walking content', 2, 2)])
743
[('Walking content.', 0, 2),
744
('Walking content.', 1, 2),
745
('Walking content.', 2, 2)])
775
746
# we must see child and otherchild
776
747
self.assertTrue(lines[('child\n', 'child')] > 0)
777
748
self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
778
749
# we dont care if we got more than that.
781
lines = iter_with_versions(None, [('Walking content', 0, 5),
782
('Walking content', 1, 5),
783
('Walking content', 2, 5),
784
('Walking content', 3, 5),
785
('Walking content', 4, 5),
786
('Walking content', 5, 5)])
752
lines = iter_with_versions(None, [('Walking content.', 0, 5),
753
('Walking content.', 1, 5),
754
('Walking content.', 2, 5),
755
('Walking content.', 3, 5),
756
('Walking content.', 4, 5),
757
('Walking content.', 5, 5)])
787
758
# all lines must be seen at least once
788
759
self.assertTrue(lines[('base\n', 'base')] > 0)
789
760
self.assertTrue(lines[('lancestor\n', 'lancestor')] > 0)
1469
1437
transport.mkdir('.')
1470
1438
files = self.factory(transport)
1471
1439
if self.cleanup is not None:
1472
self.addCleanup(self.cleanup, files)
1440
self.addCleanup(lambda:self.cleanup(files))
1475
def get_simple_key(self, suffix):
1476
"""Return a key for the object under test."""
1477
if self.key_length == 1:
1480
return ('FileA',) + (suffix,)
1482
def test_add_lines(self):
1483
f = self.get_versionedfiles()
1484
key0 = self.get_simple_key('r0')
1485
key1 = self.get_simple_key('r1')
1486
key2 = self.get_simple_key('r2')
1487
keyf = self.get_simple_key('foo')
1488
f.add_lines(key0, [], ['a\n', 'b\n'])
1490
f.add_lines(key1, [key0], ['b\n', 'c\n'])
1492
f.add_lines(key1, [], ['b\n', 'c\n'])
1494
self.assertTrue(key0 in keys)
1495
self.assertTrue(key1 in keys)
1497
for record in f.get_record_stream([key0, key1], 'unordered', True):
1498
records.append((record.key, record.get_bytes_as('fulltext')))
1500
self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
1502
def test__add_text(self):
1503
f = self.get_versionedfiles()
1504
key0 = self.get_simple_key('r0')
1505
key1 = self.get_simple_key('r1')
1506
key2 = self.get_simple_key('r2')
1507
keyf = self.get_simple_key('foo')
1508
f._add_text(key0, [], 'a\nb\n')
1510
f._add_text(key1, [key0], 'b\nc\n')
1512
f._add_text(key1, [], 'b\nc\n')
1514
self.assertTrue(key0 in keys)
1515
self.assertTrue(key1 in keys)
1517
for record in f.get_record_stream([key0, key1], 'unordered', True):
1518
records.append((record.key, record.get_bytes_as('fulltext')))
1520
self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
1522
1443
def test_annotate(self):
1523
1444
files = self.get_versionedfiles()
1524
1445
self.get_diamond_files(files)
1558
1479
self.assertRaises(RevisionNotPresent,
1559
1480
files.annotate, prefix + ('missing-key',))
1561
def test_check_no_parameters(self):
1562
files = self.get_versionedfiles()
1564
def test_check_progressbar_parameter(self):
1565
"""A progress bar can be supplied because check can be a generator."""
1566
pb = ui.ui_factory.nested_progress_bar()
1567
self.addCleanup(pb.finished)
1568
files = self.get_versionedfiles()
1569
files.check(progress_bar=pb)
1571
def test_check_with_keys_becomes_generator(self):
1572
files = self.get_versionedfiles()
1573
self.get_diamond_files(files)
1575
entries = files.check(keys=keys)
1577
# Texts output should be fulltexts.
1578
self.capture_stream(files, entries, seen.add,
1579
files.get_parent_map(keys), require_fulltext=True)
1580
# All texts should be output.
1581
self.assertEqual(set(keys), seen)
1583
def test_clear_cache(self):
1584
files = self.get_versionedfiles()
1587
1482
def test_construct(self):
1588
1483
"""Each parameterised test can be constructed on a transport."""
1589
1484
files = self.get_versionedfiles()
1591
def get_diamond_files(self, files, trailing_eol=True, left_only=False,
1486
def get_diamond_files(self, files, trailing_eol=True, left_only=False):
1593
1487
return get_diamond_files(files, self.key_length,
1594
1488
trailing_eol=trailing_eol, nograph=not self.graph,
1595
left_only=left_only, nokeys=nokeys)
1597
def _add_content_nostoresha(self, add_lines):
1598
"""When nostore_sha is supplied using old content raises."""
1599
vf = self.get_versionedfiles()
1600
empty_text = ('a', [])
1601
sample_text_nl = ('b', ["foo\n", "bar\n"])
1602
sample_text_no_nl = ('c', ["foo\n", "bar"])
1604
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
1606
sha, _, _ = vf.add_lines(self.get_simple_key(version), [],
1609
sha, _, _ = vf._add_text(self.get_simple_key(version), [],
1612
# we now have a copy of all the lines in the vf.
1613
for sha, (version, lines) in zip(
1614
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
1615
new_key = self.get_simple_key(version + "2")
1616
self.assertRaises(errors.ExistingContent,
1617
vf.add_lines, new_key, [], lines,
1619
self.assertRaises(errors.ExistingContent,
1620
vf._add_text, new_key, [], ''.join(lines),
1622
# and no new version should have been added.
1623
record = vf.get_record_stream([new_key], 'unordered', True).next()
1624
self.assertEqual('absent', record.storage_kind)
1626
def test_add_lines_nostoresha(self):
1627
self._add_content_nostoresha(add_lines=True)
1629
def test__add_text_nostoresha(self):
1630
self._add_content_nostoresha(add_lines=False)
1489
left_only=left_only)
1632
1491
def test_add_lines_return(self):
1633
1492
files = self.get_versionedfiles()
1660
1519
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1663
def test_add_lines_no_key_generates_chk_key(self):
1664
files = self.get_versionedfiles()
1665
# save code by using the stock data insertion helper.
1666
adds = self.get_diamond_files(files, nokeys=True)
1668
# We can only validate the first 2 elements returned from add_lines.
1670
self.assertEqual(3, len(add))
1671
results.append(add[:2])
1672
if self.key_length == 1:
1674
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1675
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1676
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1677
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1678
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1680
# Check the added items got CHK keys.
1681
self.assertEqual(set([
1682
('sha1:00e364d235126be43292ab09cb4686cf703ddc17',),
1683
('sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',),
1684
('sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',),
1685
('sha1:a8478686da38e370e32e42e8a0c220e33ee9132f',),
1686
('sha1:ed8bce375198ea62444dc71952b22cfc2b09226d',),
1689
elif self.key_length == 2:
1691
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1692
('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1693
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1694
('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1695
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1696
('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1697
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1698
('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1699
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1700
('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1702
# Check the added items got CHK keys.
1703
self.assertEqual(set([
1704
('FileA', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1705
('FileA', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1706
('FileA', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1707
('FileA', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1708
('FileA', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1709
('FileB', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1710
('FileB', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1711
('FileB', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1712
('FileB', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1713
('FileB', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1717
1522
def test_empty_lines(self):
1718
1523
"""Empty files can be stored."""
1719
1524
f = self.get_versionedfiles()
1741
1546
f.get_record_stream([key_b], 'unordered', True
1742
1547
).next().get_bytes_as('fulltext'))
1744
def test_get_known_graph_ancestry(self):
1745
f = self.get_versionedfiles()
1747
raise TestNotApplicable('ancestry info only relevant with graph.')
1748
key_a = self.get_simple_key('a')
1749
key_b = self.get_simple_key('b')
1750
key_c = self.get_simple_key('c')
1756
f.add_lines(key_a, [], ['\n'])
1757
f.add_lines(key_b, [key_a], ['\n'])
1758
f.add_lines(key_c, [key_a, key_b], ['\n'])
1759
kg = f.get_known_graph_ancestry([key_c])
1760
self.assertIsInstance(kg, _mod_graph.KnownGraph)
1761
self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
1763
def test_known_graph_with_fallbacks(self):
1764
f = self.get_versionedfiles('files')
1766
raise TestNotApplicable('ancestry info only relevant with graph.')
1767
if getattr(f, 'add_fallback_versioned_files', None) is None:
1768
raise TestNotApplicable("%s doesn't support fallbacks"
1769
% (f.__class__.__name__,))
1770
key_a = self.get_simple_key('a')
1771
key_b = self.get_simple_key('b')
1772
key_c = self.get_simple_key('c')
1773
# A only in fallback
1778
g = self.get_versionedfiles('fallback')
1779
g.add_lines(key_a, [], ['\n'])
1780
f.add_fallback_versioned_files(g)
1781
f.add_lines(key_b, [key_a], ['\n'])
1782
f.add_lines(key_c, [key_a, key_b], ['\n'])
1783
kg = f.get_known_graph_ancestry([key_c])
1784
self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
1786
1549
def test_get_record_stream_empty(self):
1787
1550
"""An empty stream can be requested without error."""
1788
1551
f = self.get_versionedfiles()
1793
1556
"""Assert that storage_kind is a valid storage_kind."""
1794
1557
self.assertSubset([storage_kind],
1795
1558
['mpdiff', 'knit-annotated-ft', 'knit-annotated-delta',
1796
'knit-ft', 'knit-delta', 'chunked', 'fulltext',
1797
'knit-annotated-ft-gz', 'knit-annotated-delta-gz', 'knit-ft-gz',
1799
'knit-delta-closure', 'knit-delta-closure-ref',
1800
'groupcompress-block', 'groupcompress-block-ref'])
1559
'knit-ft', 'knit-delta', 'fulltext', 'knit-annotated-ft-gz',
1560
'knit-annotated-delta-gz', 'knit-ft-gz', 'knit-delta-gz'])
1802
def capture_stream(self, f, entries, on_seen, parents,
1803
require_fulltext=False):
1562
def capture_stream(self, f, entries, on_seen, parents):
1804
1563
"""Capture a stream for testing."""
1805
1564
for factory in entries:
1806
1565
on_seen(factory.key)
1807
1566
self.assertValidStorageKind(factory.storage_kind)
1808
if factory.sha1 is not None:
1809
self.assertEqual(f.get_sha1s([factory.key])[factory.key],
1567
self.assertEqual(f.get_sha1s([factory.key])[factory.key],
1811
1569
self.assertEqual(parents[factory.key], factory.parents)
1812
1570
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1814
if require_fulltext:
1815
factory.get_bytes_as('fulltext')
1817
1573
def test_get_record_stream_interface(self):
1818
1574
"""each item in a stream has to provide a regular interface."""
1891
1634
[None, files.get_sha1s([factory.key])[factory.key]])
1892
1635
self.assertEqual(parent_map[factory.key], factory.parents)
1893
1636
# self.assertEqual(files.get_text(factory.key),
1894
ft_bytes = factory.get_bytes_as('fulltext')
1895
self.assertIsInstance(ft_bytes, str)
1896
chunked_bytes = factory.get_bytes_as('chunked')
1897
self.assertEqualDiff(ft_bytes, ''.join(chunked_bytes))
1899
self.assertStreamOrder(sort_order, seen, keys)
1901
def test_get_record_stream_interface_groupcompress(self):
1902
"""each item in a stream has to provide a regular interface."""
1903
files = self.get_versionedfiles()
1904
self.get_diamond_files(files)
1905
keys, sort_order = self.get_keys_and_groupcompress_sort_order()
1906
parent_map = files.get_parent_map(keys)
1907
entries = files.get_record_stream(keys, 'groupcompress', False)
1909
self.capture_stream(files, entries, seen.append, parent_map)
1637
self.assertIsInstance(factory.get_bytes_as('fulltext'), str)
1638
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1910
1640
self.assertStreamOrder(sort_order, seen, keys)
1912
1642
def assertStreamOrder(self, sort_order, seen, keys):
1975
1704
entries = files.get_record_stream(keys, 'topological', False)
1976
1705
self.assertAbsentRecord(files, keys, parent_map, entries)
1978
def assertRecordHasContent(self, record, bytes):
1979
"""Assert that record has the bytes bytes."""
1980
self.assertEqual(bytes, record.get_bytes_as('fulltext'))
1981
self.assertEqual(bytes, ''.join(record.get_bytes_as('chunked')))
1983
def test_get_record_stream_native_formats_are_wire_ready_one_ft(self):
1984
files = self.get_versionedfiles()
1985
key = self.get_simple_key('foo')
1986
files.add_lines(key, (), ['my text\n', 'content'])
1987
stream = files.get_record_stream([key], 'unordered', False)
1988
record = stream.next()
1989
if record.storage_kind in ('chunked', 'fulltext'):
1990
# chunked and fulltext representations are for direct use not wire
1991
# serialisation: check they are able to be used directly. To send
1992
# such records over the wire translation will be needed.
1993
self.assertRecordHasContent(record, "my text\ncontent")
1995
bytes = [record.get_bytes_as(record.storage_kind)]
1996
network_stream = versionedfile.NetworkRecordStream(bytes).read()
1997
source_record = record
1999
for record in network_stream:
2000
records.append(record)
2001
self.assertEqual(source_record.storage_kind,
2002
record.storage_kind)
2003
self.assertEqual(source_record.parents, record.parents)
2005
source_record.get_bytes_as(source_record.storage_kind),
2006
record.get_bytes_as(record.storage_kind))
2007
self.assertEqual(1, len(records))
2009
def assertStreamMetaEqual(self, records, expected, stream):
2010
"""Assert that streams expected and stream have the same records.
2012
:param records: A list to collect the seen records.
2013
:return: A generator of the records in stream.
2015
# We make assertions during copying to catch things early for
2017
for record, ref_record in izip(stream, expected):
2018
records.append(record)
2019
self.assertEqual(ref_record.key, record.key)
2020
self.assertEqual(ref_record.storage_kind, record.storage_kind)
2021
self.assertEqual(ref_record.parents, record.parents)
2024
def stream_to_bytes_or_skip_counter(self, skipped_records, full_texts,
2026
"""Convert a stream to a bytes iterator.
2028
:param skipped_records: A list with one element to increment when a
2030
:param full_texts: A dict from key->fulltext representation, for
2031
checking chunked or fulltext stored records.
2032
:param stream: A record_stream.
2033
:return: An iterator over the bytes of each record.
2035
for record in stream:
2036
if record.storage_kind in ('chunked', 'fulltext'):
2037
skipped_records[0] += 1
2038
# check the content is correct for direct use.
2039
self.assertRecordHasContent(record, full_texts[record.key])
2041
yield record.get_bytes_as(record.storage_kind)
2043
def test_get_record_stream_native_formats_are_wire_ready_ft_delta(self):
2044
files = self.get_versionedfiles()
2045
target_files = self.get_versionedfiles('target')
2046
key = self.get_simple_key('ft')
2047
key_delta = self.get_simple_key('delta')
2048
files.add_lines(key, (), ['my text\n', 'content'])
2050
delta_parents = (key,)
2053
files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2054
local = files.get_record_stream([key, key_delta], 'unordered', False)
2055
ref = files.get_record_stream([key, key_delta], 'unordered', False)
2056
skipped_records = [0]
2058
key: "my text\ncontent",
2059
key_delta: "different\ncontent\n",
2061
byte_stream = self.stream_to_bytes_or_skip_counter(
2062
skipped_records, full_texts, local)
2063
network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
2065
# insert the stream from the network into a versioned files object so we can
2066
# check the content was carried across correctly without doing delta
2068
target_files.insert_record_stream(
2069
self.assertStreamMetaEqual(records, ref, network_stream))
2070
# No duplicates on the wire thank you!
2071
self.assertEqual(2, len(records) + skipped_records[0])
2073
# if any content was copied it all must have all been.
2074
self.assertIdenticalVersionedFile(files, target_files)
2076
def test_get_record_stream_native_formats_are_wire_ready_delta(self):
2077
# copy a delta over the wire
2078
files = self.get_versionedfiles()
2079
target_files = self.get_versionedfiles('target')
2080
key = self.get_simple_key('ft')
2081
key_delta = self.get_simple_key('delta')
2082
files.add_lines(key, (), ['my text\n', 'content'])
2084
delta_parents = (key,)
2087
files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2088
# Copy the basis text across so we can reconstruct the delta during
2089
# insertion into target.
2090
target_files.insert_record_stream(files.get_record_stream([key],
2091
'unordered', False))
2092
local = files.get_record_stream([key_delta], 'unordered', False)
2093
ref = files.get_record_stream([key_delta], 'unordered', False)
2094
skipped_records = [0]
2096
key_delta: "different\ncontent\n",
2098
byte_stream = self.stream_to_bytes_or_skip_counter(
2099
skipped_records, full_texts, local)
2100
network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
2102
# insert the stream from the network into a versioned files object so we can
2103
# check the content was carried across correctly without doing delta
2104
# inspection during check_stream.
2105
target_files.insert_record_stream(
2106
self.assertStreamMetaEqual(records, ref, network_stream))
2107
# No duplicates on the wire thank you!
2108
self.assertEqual(1, len(records) + skipped_records[0])
2110
# if any content was copied it all must have all been
2111
self.assertIdenticalVersionedFile(files, target_files)
2113
def test_get_record_stream_wire_ready_delta_closure_included(self):
2114
# copy a delta over the wire with the ability to get its full text.
2115
files = self.get_versionedfiles()
2116
key = self.get_simple_key('ft')
2117
key_delta = self.get_simple_key('delta')
2118
files.add_lines(key, (), ['my text\n', 'content'])
2120
delta_parents = (key,)
2123
files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2124
local = files.get_record_stream([key_delta], 'unordered', True)
2125
ref = files.get_record_stream([key_delta], 'unordered', True)
2126
skipped_records = [0]
2128
key_delta: "different\ncontent\n",
2130
byte_stream = self.stream_to_bytes_or_skip_counter(
2131
skipped_records, full_texts, local)
2132
network_stream = versionedfile.NetworkRecordStream(byte_stream).read()
2134
# insert the stream from the network into a versioned files object so we can
2135
# check the content was carried across correctly without doing delta
2136
# inspection during check_stream.
2137
for record in self.assertStreamMetaEqual(records, ref, network_stream):
2138
# we have to be able to get the full text out:
2139
self.assertRecordHasContent(record, full_texts[record.key])
2140
# No duplicates on the wire thank you!
2141
self.assertEqual(1, len(records) + skipped_records[0])
2143
1707
def assertAbsentRecord(self, files, keys, parents, entries):
2144
1708
"""Helper for test_get_record_stream_missing_records_are_absent."""
2438
1965
self.assertIdenticalVersionedFile(source, files)
2440
def test_insert_record_stream_long_parent_chain_out_of_order(self):
2441
"""An out of order stream can either error or work."""
2443
raise TestNotApplicable('ancestry info only relevant with graph.')
2444
# Create a reasonably long chain of records based on each other, where
2445
# most will be deltas.
2446
source = self.get_versionedfiles('source')
2449
content = [('same same %d\n' % n) for n in range(500)]
2450
for letter in 'abcdefghijklmnopqrstuvwxyz':
2451
key = ('key-' + letter,)
2452
if self.key_length == 2:
2453
key = ('prefix',) + key
2454
content.append('content for ' + letter + '\n')
2455
source.add_lines(key, parents, content)
2458
# Create a stream of these records, excluding the first record that the
2459
# rest ultimately depend upon, and insert it into a new vf.
2461
for key in reversed(keys):
2462
streams.append(source.get_record_stream([key], 'unordered', False))
2463
deltas = chain(*streams[:-1])
2464
files = self.get_versionedfiles()
2466
files.insert_record_stream(deltas)
2467
except RevisionNotPresent:
2468
# Must not have corrupted the file.
2471
# Must only report either just the first key as a missing parent,
2472
# no key as missing (for nodelta scenarios).
2473
missing = set(files.get_missing_compression_parent_keys())
2474
missing.discard(keys[0])
2475
self.assertEqual(set(), missing)
2477
def get_knit_delta_source(self):
2478
"""Get a source that can produce a stream with knit delta records,
2479
regardless of this test's scenario.
2481
mapper = self.get_mapper()
2482
source_transport = self.get_transport('source')
2483
source_transport.mkdir('.')
2484
source = make_file_factory(False, mapper)(source_transport)
2485
get_diamond_files(source, self.key_length, trailing_eol=True,
2486
nograph=False, left_only=False)
2489
1967
def test_insert_record_stream_delta_missing_basis_no_corruption(self):
2490
"""Insertion where a needed basis is not included notifies the caller
2491
of the missing basis. In the meantime a record missing its basis is
2494
source = self.get_knit_delta_source()
2495
keys = [self.get_simple_key('origin'), self.get_simple_key('merged')]
2496
entries = source.get_record_stream(keys, 'unordered', False)
2497
files = self.get_versionedfiles()
2498
if self.support_partial_insertion:
2499
self.assertEqual([],
2500
list(files.get_missing_compression_parent_keys()))
2501
files.insert_record_stream(entries)
2502
missing_bases = files.get_missing_compression_parent_keys()
2503
self.assertEqual(set([self.get_simple_key('left')]),
2505
self.assertEqual(set(keys), set(files.get_parent_map(keys)))
2508
errors.RevisionNotPresent, files.insert_record_stream, entries)
2511
def test_insert_record_stream_delta_missing_basis_can_be_added_later(self):
2512
"""Insertion where a needed basis is not included notifies the caller
2513
of the missing basis. That basis can be added in a second
2514
insert_record_stream call that does not need to repeat records present
2515
in the previous stream. The record(s) that required that basis are
2516
fully inserted once their basis is no longer missing.
2518
if not self.support_partial_insertion:
2519
raise TestNotApplicable(
2520
'versioned file scenario does not support partial insertion')
2521
source = self.get_knit_delta_source()
2522
entries = source.get_record_stream([self.get_simple_key('origin'),
2523
self.get_simple_key('merged')], 'unordered', False)
2524
files = self.get_versionedfiles()
2525
files.insert_record_stream(entries)
2526
missing_bases = files.get_missing_compression_parent_keys()
2527
self.assertEqual(set([self.get_simple_key('left')]),
2529
# 'merged' is inserted (although a commit of a write group involving
2530
# this versionedfiles would fail).
2531
merged_key = self.get_simple_key('merged')
2533
[merged_key], files.get_parent_map([merged_key]).keys())
2534
# Add the full delta closure of the missing records
2535
missing_entries = source.get_record_stream(
2536
missing_bases, 'unordered', True)
2537
files.insert_record_stream(missing_entries)
2538
# Now 'merged' is fully inserted (and a commit would succeed).
2539
self.assertEqual([], list(files.get_missing_compression_parent_keys()))
2541
[merged_key], files.get_parent_map([merged_key]).keys())
1968
"""Insertion where a needed basis is not included aborts safely."""
1969
# We use a knit always here to be sure we are getting a binary delta.
1970
mapper = self.get_mapper()
1971
source_transport = self.get_transport('source')
1972
source_transport.mkdir('.')
1973
source = make_file_factory(False, mapper)(source_transport)
1974
self.get_diamond_files(source)
1975
entries = source.get_record_stream(['origin', 'merged'], 'unordered', False)
1976
files = self.get_versionedfiles()
1977
self.assertRaises(RevisionNotPresent, files.insert_record_stream,
1980
self.assertEqual({}, files.get_parent_map([]))
2544
1982
def test_iter_lines_added_or_present_in_keys(self):
2545
1983
# test that we get at least an equalset of the lines added by
2588
2027
lines = iter_with_keys(
2589
2028
[self.get_simple_key('child'), self.get_simple_key('otherchild')],
2590
[('Walking content', 0, 2),
2591
('Walking content', 1, 2),
2592
('Walking content', 2, 2)])
2029
[('Walking content.', 0, 2),
2030
('Walking content.', 1, 2),
2031
('Walking content.', 2, 2)])
2593
2032
# we must see child and otherchild
2594
2033
self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
2595
2034
self.assertTrue(
2596
2035
lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2597
2036
# we dont care if we got more than that.
2599
2038
# test all lines
2600
2039
lines = iter_with_keys(files.keys(),
2601
[('Walking content', 0, 5),
2602
('Walking content', 1, 5),
2603
('Walking content', 2, 5),
2604
('Walking content', 3, 5),
2605
('Walking content', 4, 5),
2606
('Walking content', 5, 5)])
2040
[('Walking content.', 0, 5),
2041
('Walking content.', 1, 5),
2042
('Walking content.', 2, 5),
2043
('Walking content.', 3, 5),
2044
('Walking content.', 4, 5),
2045
('Walking content.', 5, 5)])
2607
2046
# all lines must be seen at least once
2608
2047
self.assertTrue(lines[('base\n', self.get_simple_key('base'))] > 0)
2609
2048
self.assertTrue(
2718
2157
key = ('foo', 'bar',)
2719
2158
files.add_lines(key, (), [])
2720
2159
self.assertEqual(set([key]), set(files.keys()))
2723
class VirtualVersionedFilesTests(TestCase):
2724
"""Basic tests for the VirtualVersionedFiles implementations."""
2726
def _get_parent_map(self, keys):
2729
if k in self._parent_map:
2730
ret[k] = self._parent_map[k]
2734
TestCase.setUp(self)
2736
self._parent_map = {}
2737
self.texts = VirtualVersionedFiles(self._get_parent_map,
2740
def test_add_lines(self):
2741
self.assertRaises(NotImplementedError,
2742
self.texts.add_lines, "foo", [], [])
2744
def test_add_mpdiffs(self):
2745
self.assertRaises(NotImplementedError,
2746
self.texts.add_mpdiffs, [])
2748
def test_check_noerrors(self):
2751
def test_insert_record_stream(self):
2752
self.assertRaises(NotImplementedError, self.texts.insert_record_stream,
2755
def test_get_sha1s_nonexistent(self):
2756
self.assertEquals({}, self.texts.get_sha1s([("NONEXISTENT",)]))
2758
def test_get_sha1s(self):
2759
self._lines["key"] = ["dataline1", "dataline2"]
2760
self.assertEquals({("key",): osutils.sha_strings(self._lines["key"])},
2761
self.texts.get_sha1s([("key",)]))
2763
def test_get_parent_map(self):
2764
self._parent_map = {"G": ("A", "B")}
2765
self.assertEquals({("G",): (("A",),("B",))},
2766
self.texts.get_parent_map([("G",), ("L",)]))
2768
def test_get_record_stream(self):
2769
self._lines["A"] = ["FOO", "BAR"]
2770
it = self.texts.get_record_stream([("A",)], "unordered", True)
2772
self.assertEquals("chunked", record.storage_kind)
2773
self.assertEquals("FOOBAR", record.get_bytes_as("fulltext"))
2774
self.assertEquals(["FOO", "BAR"], record.get_bytes_as("chunked"))
2776
def test_get_record_stream_absent(self):
2777
it = self.texts.get_record_stream([("A",)], "unordered", True)
2779
self.assertEquals("absent", record.storage_kind)
2781
def test_iter_lines_added_or_present_in_keys(self):
2782
self._lines["A"] = ["FOO", "BAR"]
2783
self._lines["B"] = ["HEY"]
2784
self._lines["C"] = ["Alberta"]
2785
it = self.texts.iter_lines_added_or_present_in_keys([("A",), ("B",)])
2786
self.assertEquals(sorted([("FOO", "A"), ("BAR", "A"), ("HEY", "B")]),
2790
class TestOrderingVersionedFilesDecorator(TestCaseWithMemoryTransport):
2792
def get_ordering_vf(self, key_priority):
2793
builder = self.make_branch_builder('test')
2794
builder.start_series()
2795
builder.build_snapshot('A', None, [
2796
('add', ('', 'TREE_ROOT', 'directory', None))])
2797
builder.build_snapshot('B', ['A'], [])
2798
builder.build_snapshot('C', ['B'], [])
2799
builder.build_snapshot('D', ['C'], [])
2800
builder.finish_series()
2801
b = builder.get_branch()
2803
self.addCleanup(b.unlock)
2804
vf = b.repository.inventories
2805
return versionedfile.OrderingVersionedFilesDecorator(vf, key_priority)
2807
def test_get_empty(self):
2808
vf = self.get_ordering_vf({})
2809
self.assertEqual([], vf.calls)
2811
def test_get_record_stream_topological(self):
2812
vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
2813
request_keys = [('B',), ('C',), ('D',), ('A',)]
2814
keys = [r.key for r in vf.get_record_stream(request_keys,
2815
'topological', False)]
2816
# We should have gotten the keys in topological order
2817
self.assertEqual([('A',), ('B',), ('C',), ('D',)], keys)
2818
# And recorded that the request was made
2819
self.assertEqual([('get_record_stream', request_keys, 'topological',
2822
def test_get_record_stream_ordered(self):
2823
vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
2824
request_keys = [('B',), ('C',), ('D',), ('A',)]
2825
keys = [r.key for r in vf.get_record_stream(request_keys,
2826
'unordered', False)]
2827
# They should be returned based on their priority
2828
self.assertEqual([('D',), ('B',), ('A',), ('C',)], keys)
2829
# And the request recorded
2830
self.assertEqual([('get_record_stream', request_keys, 'unordered',
2833
def test_get_record_stream_implicit_order(self):
2834
vf = self.get_ordering_vf({('B',): 2, ('D',): 1})
2835
request_keys = [('B',), ('C',), ('D',), ('A',)]
2836
keys = [r.key for r in vf.get_record_stream(request_keys,
2837
'unordered', False)]
2838
# A and C are not in the map, so they get sorted to the front. A comes
2839
# before C alphabetically, so it comes back first
2840
self.assertEqual([('A',), ('C',), ('D',), ('B',)], keys)
2841
# And the request recorded
2842
self.assertEqual([('get_record_stream', request_keys, 'unordered',