bzr branch
http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar
|
5557.1.7
by John Arbash Meinel
Merge in the bzr.dev 5582 |
1 |
# Copyright (C) 2008-2011 Canonical Ltd
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
2 |
#
|
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
3 |
# This program is free software; you can redistribute it and/or modify
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
4 |
# it under the terms of the GNU General Public License as published by
|
5 |
# the Free Software Foundation; either version 2 of the License, or
|
|
6 |
# (at your option) any later version.
|
|
7 |
#
|
|
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
8 |
# This program is distributed in the hope that it will be useful,
|
9 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11 |
# GNU General Public License for more details.
|
|
|
3735.31.2
by John Arbash Meinel
Cleanup trailing whitespace, get test_source to pass by removing asserts. |
12 |
#
|
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
13 |
# You should have received a copy of the GNU General Public License
|
14 |
# along with this program; if not, write to the Free Software
|
|
|
3735.36.3
by John Arbash Meinel
Add the new address for FSF to the new files. |
15 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
0.17.1
by Robert Collins
Starting point. Interface tests hooked up and failing. |
16 |
|
17 |
"""Tests for group compression."""
|
|
18 |
||
19 |
import zlib |
|
20 |
||
|
6624
by Jelmer Vernooij
Merge Python3 porting work ('py3 pokes') |
21 |
from .. import ( |
|
5755.2.4
by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles |
22 |
config, |
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
23 |
errors, |
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
24 |
osutils, |
|
3735.31.1
by John Arbash Meinel
Bring the groupcompress plugin into the brisbane-core branch. |
25 |
tests, |
|
4465.2.3
by Aaron Bentley
Update to change redundant inserts into a warning. |
26 |
trace, |
|
6670.4.1
by Jelmer Vernooij
Update imports. |
27 |
)
|
28 |
from ..bzr import ( |
|
29 |
btree_index, |
|
30 |
groupcompress, |
|
|
6744
by Jelmer Vernooij
Merge lp:~jelmer/brz/move-errors-knit. |
31 |
knit, |
|
6670.4.1
by Jelmer Vernooij
Update imports. |
32 |
index as _mod_index, |
|
3735.32.20
by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given. |
33 |
versionedfile, |
|
3735.31.1
by John Arbash Meinel
Bring the groupcompress plugin into the brisbane-core branch. |
34 |
)
|
|
6624
by Jelmer Vernooij
Merge Python3 porting work ('py3 pokes') |
35 |
from ..osutils import sha_string |
36 |
from .test__groupcompress import compiled_groupcompress_feature |
|
37 |
from .scenarios import load_tests_apply_scenarios |
|
|
5559.2.2
by Martin Pool
Change to using standard load_tests_apply_scenarios. |
38 |
|
39 |
||
40 |
def group_compress_implementation_scenarios(): |
|
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
41 |
scenarios = [ |
42 |
('python', {'compressor': groupcompress.PythonGroupCompressor}), |
|
43 |
]
|
|
|
4913.2.24
by John Arbash Meinel
Track down a few more import typos. |
44 |
if compiled_groupcompress_feature.available(): |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
45 |
scenarios.append(('C', |
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
46 |
{'compressor': groupcompress.PyrexGroupCompressor})) |
|
5559.2.2
by Martin Pool
Change to using standard load_tests_apply_scenarios. |
47 |
return scenarios |
48 |
||
49 |
||
50 |
load_tests = load_tests_apply_scenarios |
|
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
51 |
|
52 |
||
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
53 |
class TestGroupCompressor(tests.TestCase): |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
54 |
|
55 |
def _chunks_to_repr_lines(self, chunks): |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
56 |
return '\n'.join(map(repr, b''.join(chunks).split(b'\n'))) |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
57 |
|
58 |
def assertEqualDiffEncoded(self, expected, actual): |
|
59 |
"""Compare the actual content to the expected content. |
|
60 |
||
61 |
:param expected: A group of chunks that we expect to see
|
|
62 |
:param actual: The measured 'chunks'
|
|
63 |
||
64 |
We will transform the chunks back into lines, and then run 'repr()'
|
|
65 |
over them to handle non-ascii characters.
|
|
66 |
"""
|
|
67 |
self.assertEqualDiff(self._chunks_to_repr_lines(expected), |
|
68 |
self._chunks_to_repr_lines(actual)) |
|
69 |
||
70 |
||
71 |
class TestAllGroupCompressors(TestGroupCompressor): |
|
|
0.17.2
by Robert Collins
Core proof of concept working. |
72 |
"""Tests for GroupCompressor""" |
73 |
||
|
5559.2.2
by Martin Pool
Change to using standard load_tests_apply_scenarios. |
74 |
scenarios = group_compress_implementation_scenarios() |
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
75 |
compressor = None # Set by scenario |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
76 |
|
|
0.17.2
by Robert Collins
Core proof of concept working. |
77 |
def test_empty_delta(self): |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
78 |
compressor = self.compressor() |
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
79 |
self.assertEqual([], compressor.chunks) |
|
0.17.2
by Robert Collins
Core proof of concept working. |
80 |
|
81 |
def test_one_nosha_delta(self): |
|
82 |
# diff against NUKK
|
|
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
83 |
compressor = self.compressor() |
|
7459.2.1
by Jelmer Vernooij
Pass chunks to compress(). |
84 |
sha1, start_point, end_point, _ = compressor.compress( |
85 |
('label',), [b'strange\ncommon\n'], None) |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
86 |
self.assertEqual(sha_string(b'strange\ncommon\n'), sha1) |
87 |
expected_lines = b'f\x0fstrange\ncommon\n' |
|
88 |
self.assertEqual(expected_lines, b''.join(compressor.chunks)) |
|
|
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
89 |
self.assertEqual(0, start_point) |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
90 |
self.assertEqual(len(expected_lines), end_point) |
|
0.17.2
by Robert Collins
Core proof of concept working. |
91 |
|
|
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
92 |
def test_empty_content(self): |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
93 |
compressor = self.compressor() |
|
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
94 |
# Adding empty bytes should return the 'null' record
|
|
7459.2.1
by Jelmer Vernooij
Pass chunks to compress(). |
95 |
sha1, start_point, end_point, kind = compressor.compress( |
96 |
('empty',), [], None) |
|
|
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
97 |
self.assertEqual(0, start_point) |
98 |
self.assertEqual(0, end_point) |
|
99 |
self.assertEqual('fulltext', kind) |
|
100 |
self.assertEqual(groupcompress._null_sha1, sha1) |
|
101 |
self.assertEqual(0, compressor.endpoint) |
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
102 |
self.assertEqual([], compressor.chunks) |
|
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
103 |
# Even after adding some content
|
|
7459.2.1
by Jelmer Vernooij
Pass chunks to compress(). |
104 |
compressor.compress( |
105 |
('content',), [b'some\nbytes\n'], None) |
|
|
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
106 |
self.assertTrue(compressor.endpoint > 0) |
|
7459.2.1
by Jelmer Vernooij
Pass chunks to compress(). |
107 |
sha1, start_point, end_point, kind = compressor.compress( |
108 |
('empty2',), [], None) |
|
|
3735.2.162
by John Arbash Meinel
Change GroupCompressor.compress() to return the start_point. |
109 |
self.assertEqual(0, start_point) |
110 |
self.assertEqual(0, end_point) |
|
111 |
self.assertEqual('fulltext', kind) |
|
112 |
self.assertEqual(groupcompress._null_sha1, sha1) |
|
113 |
||
|
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
114 |
def test_extract_from_compressor(self): |
115 |
# Knit fetching will try to reconstruct texts locally which results in
|
|
116 |
# reading something that is in the compressor stream already.
|
|
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
117 |
compressor = self.compressor() |
|
7459.2.1
by Jelmer Vernooij
Pass chunks to compress(). |
118 |
sha1_1, _, _, _ = compressor.compress( |
119 |
('label',), [b'strange\ncommon long line\nthat needs a 16 byte match\n'], None) |
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
120 |
expected_lines = list(compressor.chunks) |
|
7459.2.1
by Jelmer Vernooij
Pass chunks to compress(). |
121 |
sha1_2, _, end_point, _ = compressor.compress( |
122 |
('newlabel',), [b'common long line\nthat needs a 16 byte match\ndifferent\n'], None) |
|
|
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
123 |
# get the first out
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
124 |
self.assertEqual((b'strange\ncommon long line\n' |
125 |
b'that needs a 16 byte match\n', sha1_1), |
|
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
126 |
compressor.extract(('label',))) |
|
0.17.11
by Robert Collins
Add extraction of just-compressed texts to support converting from knits. |
127 |
# and the second
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
128 |
self.assertEqual((b'common long line\nthat needs a 16 byte match\n' |
129 |
b'different\n', sha1_2), |
|
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
130 |
compressor.extract(('newlabel',))) |
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
131 |
|
|
4241.17.2
by John Arbash Meinel
PythonGroupCompressor needs to support pop_last() properly. |
132 |
def test_pop_last(self): |
133 |
compressor = self.compressor() |
|
|
7459.2.1
by Jelmer Vernooij
Pass chunks to compress(). |
134 |
_, _, _, _ = compressor.compress( |
135 |
('key1',), [b'some text\nfor the first entry\n'], None) |
|
|
4241.17.2
by John Arbash Meinel
PythonGroupCompressor needs to support pop_last() properly. |
136 |
expected_lines = list(compressor.chunks) |
|
7459.2.1
by Jelmer Vernooij
Pass chunks to compress(). |
137 |
_, _, _, _ = compressor.compress( |
138 |
('key2',), [b'some text\nfor the second entry\n'], None) |
|
|
4241.17.2
by John Arbash Meinel
PythonGroupCompressor needs to support pop_last() properly. |
139 |
compressor.pop_last() |
140 |
self.assertEqual(expected_lines, compressor.chunks) |
|
141 |
||
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
142 |
|
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
143 |
class TestPyrexGroupCompressor(TestGroupCompressor): |
144 |
||
|
4913.2.24
by John Arbash Meinel
Track down a few more import typos. |
145 |
_test_needs_features = [compiled_groupcompress_feature] |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
146 |
compressor = groupcompress.PyrexGroupCompressor |
147 |
||
148 |
def test_stats(self): |
|
149 |
compressor = self.compressor() |
|
|
7459.2.1
by Jelmer Vernooij
Pass chunks to compress(). |
150 |
compressor.compress( |
151 |
('label',), [b'strange\n', |
|
152 |
b'common very very long line\n', |
|
153 |
b'plus more text\n'], None) |
|
154 |
compressor.compress( |
|
155 |
('newlabel',), |
|
156 |
[b'common very very long line\n', |
|
157 |
b'plus more text\n', |
|
158 |
b'different\n', |
|
159 |
b'moredifferent\n'], None) |
|
160 |
compressor.compress( |
|
161 |
('label3',), |
|
162 |
[b'new\n', |
|
163 |
b'common very very long line\n', |
|
164 |
b'plus more text\n', |
|
165 |
b'different\n', |
|
166 |
b'moredifferent\n'], None) |
|
|
3735.40.7
by John Arbash Meinel
Move even more functionality into EquivalenceTable. |
167 |
self.assertAlmostEqual(1.9, compressor.ratio(), 1) |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
168 |
|
169 |
def test_two_nosha_delta(self): |
|
170 |
compressor = self.compressor() |
|
|
7459.2.1
by Jelmer Vernooij
Pass chunks to compress(). |
171 |
sha1_1, _, _, _ = compressor.compress( |
172 |
('label',), |
|
173 |
[b'strange\ncommon long line\nthat needs a 16 byte match\n'], None) |
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
174 |
expected_lines = list(compressor.chunks) |
|
7459.2.1
by Jelmer Vernooij
Pass chunks to compress(). |
175 |
sha1_2, start_point, end_point, _ = compressor.compress( |
176 |
('newlabel',), [b'common long line\nthat needs a 16 byte match\ndifferent\n'], None) |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
177 |
self.assertEqual(sha_string(b'common long line\n' |
178 |
b'that needs a 16 byte match\n' |
|
179 |
b'different\n'), sha1_2) |
|
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
180 |
expected_lines.extend([ |
181 |
# 'delta', delta length
|
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
182 |
b'd\x0f', |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
183 |
# source and target length
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
184 |
b'\x36', |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
185 |
# copy the line common
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
186 |
b'\x91\x0a\x2c', # copy, offset 0x0a, len 0x2c |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
187 |
# add the line different, and the trailing newline
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
188 |
b'\x0adifferent\n', # insert 10 bytes |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
189 |
])
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
190 |
self.assertEqualDiffEncoded(expected_lines, compressor.chunks) |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
191 |
self.assertEqual(sum(map(len, expected_lines)), end_point) |
192 |
||
193 |
def test_three_nosha_delta(self): |
|
194 |
# The first interesting test: make a change that should use lines from
|
|
195 |
# both parents.
|
|
196 |
compressor = self.compressor() |
|
|
7459.2.1
by Jelmer Vernooij
Pass chunks to compress(). |
197 |
sha1_1, _, _, _ = compressor.compress( |
198 |
('label',), [b'strange\ncommon very very long line\nwith some extra text\n'], None) |
|
199 |
sha1_2, _, _, _ = compressor.compress( |
|
200 |
('newlabel',), [b'different\nmoredifferent\nand then some more\n'], None) |
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
201 |
expected_lines = list(compressor.chunks) |
|
7459.2.1
by Jelmer Vernooij
Pass chunks to compress(). |
202 |
sha1_3, start_point, end_point, _ = compressor.compress( |
203 |
('label3',), [b'new\ncommon very very long line\nwith some extra text\n', |
|
204 |
b'different\nmoredifferent\nand then some more\n'], None) |
|
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
205 |
self.assertEqual( |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
206 |
sha_string(b'new\ncommon very very long line\nwith some extra text\n' |
207 |
b'different\nmoredifferent\nand then some more\n'), |
|
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
208 |
sha1_3) |
209 |
expected_lines.extend([ |
|
210 |
# 'delta', delta length
|
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
211 |
b'd\x0b', |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
212 |
# source and target length
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
213 |
b'\x5f' |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
214 |
# insert new
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
215 |
b'\x03new', |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
216 |
# Copy of first parent 'common' range
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
217 |
b'\x91\x09\x31' # copy, offset 0x09, 0x31 bytes |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
218 |
# Copy of second parent 'different' range
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
219 |
b'\x91\x3c\x2b' # copy, offset 0x3c, 0x2b bytes |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
220 |
])
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
221 |
self.assertEqualDiffEncoded(expected_lines, compressor.chunks) |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
222 |
self.assertEqual(sum(map(len, expected_lines)), end_point) |
223 |
||
224 |
||
225 |
class TestPythonGroupCompressor(TestGroupCompressor): |
|
226 |
||
227 |
compressor = groupcompress.PythonGroupCompressor |
|
228 |
||
229 |
def test_stats(self): |
|
230 |
compressor = self.compressor() |
|
|
7459.2.1
by Jelmer Vernooij
Pass chunks to compress(). |
231 |
compressor.compress( |
232 |
('label',), [b'strange\n', |
|
233 |
b'common very very long line\n', |
|
234 |
b'plus more text\n'], None) |
|
235 |
compressor.compress( |
|
236 |
('newlabel',), [ |
|
237 |
b'common very very long line\n', |
|
238 |
b'plus more text\n', |
|
239 |
b'different\n', |
|
240 |
b'moredifferent\n'], None) |
|
241 |
compressor.compress( |
|
|
7459.2.3
by Jelmer Vernooij
Fix flake8. |
242 |
('label3',), |
243 |
[b'new\n', |
|
244 |
b'common very very long line\n', |
|
245 |
b'plus more text\n', |
|
246 |
b'different\n', |
|
247 |
b'moredifferent\n'], None) |
|
|
3735.40.7
by John Arbash Meinel
Move even more functionality into EquivalenceTable. |
248 |
self.assertAlmostEqual(1.9, compressor.ratio(), 1) |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
249 |
|
250 |
def test_two_nosha_delta(self): |
|
251 |
compressor = self.compressor() |
|
|
7459.2.1
by Jelmer Vernooij
Pass chunks to compress(). |
252 |
sha1_1, _, _, _ = compressor.compress( |
253 |
('label',), [b'strange\ncommon long line\nthat needs a 16 byte match\n'], None) |
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
254 |
expected_lines = list(compressor.chunks) |
|
7459.2.1
by Jelmer Vernooij
Pass chunks to compress(). |
255 |
sha1_2, start_point, end_point, _ = compressor.compress( |
256 |
('newlabel',), [b'common long line\nthat needs a 16 byte match\ndifferent\n'], None) |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
257 |
self.assertEqual(sha_string(b'common long line\n' |
258 |
b'that needs a 16 byte match\n' |
|
259 |
b'different\n'), sha1_2) |
|
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
260 |
expected_lines.extend([ |
261 |
# 'delta', delta length
|
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
262 |
b'd\x0f', |
|
3735.40.10
by John Arbash Meinel
Merge in the new delta format code. |
263 |
# target length
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
264 |
b'\x36', |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
265 |
# copy the line common
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
266 |
b'\x91\x0a\x2c', # copy, offset 0x0a, len 0x2c |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
267 |
# add the line different, and the trailing newline
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
268 |
b'\x0adifferent\n', # insert 10 bytes |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
269 |
])
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
270 |
self.assertEqualDiffEncoded(expected_lines, compressor.chunks) |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
271 |
self.assertEqual(sum(map(len, expected_lines)), end_point) |
272 |
||
273 |
def test_three_nosha_delta(self): |
|
274 |
# The first interesting test: make a change that should use lines from
|
|
275 |
# both parents.
|
|
276 |
compressor = self.compressor() |
|
|
7459.2.1
by Jelmer Vernooij
Pass chunks to compress(). |
277 |
sha1_1, _, _, _ = compressor.compress( |
278 |
('label',), [b'strange\ncommon very very long line\nwith some extra text\n'], None) |
|
279 |
sha1_2, _, _, _ = compressor.compress( |
|
280 |
('newlabel',), [b'different\nmoredifferent\nand then some more\n'], None) |
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
281 |
expected_lines = list(compressor.chunks) |
|
7459.2.1
by Jelmer Vernooij
Pass chunks to compress(). |
282 |
sha1_3, start_point, end_point, _ = compressor.compress( |
283 |
('label3',), [b'new\ncommon very very long line\nwith some extra text\n', |
|
284 |
b'different\nmoredifferent\nand then some more\n'], None) |
|
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
285 |
self.assertEqual( |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
286 |
sha_string(b'new\ncommon very very long line\nwith some extra text\n' |
287 |
b'different\nmoredifferent\nand then some more\n'), |
|
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
288 |
sha1_3) |
289 |
expected_lines.extend([ |
|
290 |
# 'delta', delta length
|
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
291 |
b'd\x0c', |
|
3735.40.10
by John Arbash Meinel
Merge in the new delta format code. |
292 |
# target length
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
293 |
b'\x5f' |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
294 |
# insert new
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
295 |
b'\x04new\n', |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
296 |
# Copy of first parent 'common' range
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
297 |
b'\x91\x0a\x30' # copy, offset 0x0a, 0x30 bytes |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
298 |
# Copy of second parent 'different' range
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
299 |
b'\x91\x3c\x2b' # copy, offset 0x3c, 0x2b bytes |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
300 |
])
|
|
3735.40.17
by John Arbash Meinel
Change the attribute from 'lines' to 'chunks' to make it more |
301 |
self.assertEqualDiffEncoded(expected_lines, compressor.chunks) |
|
3735.40.4
by John Arbash Meinel
Factor out tests that rely on the exact bytecode. |
302 |
self.assertEqual(sum(map(len, expected_lines)), end_point) |
303 |
||
304 |
||
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
305 |
class TestGroupCompressBlock(tests.TestCase): |
306 |
||
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
307 |
def make_block(self, key_to_text): |
308 |
"""Create a GroupCompressBlock, filling it with the given texts.""" |
|
309 |
compressor = groupcompress.GroupCompressor() |
|
310 |
start = 0 |
|
311 |
for key in sorted(key_to_text): |
|
|
7459.2.1
by Jelmer Vernooij
Pass chunks to compress(). |
312 |
compressor.compress(key, [key_to_text[key]], None) |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
313 |
locs = dict((key, (start, end)) for key, (start, _, end, _) |
|
6656.1.1
by Martin
Apply 2to3 dict fixer and clean up resulting mess using view helpers |
314 |
in compressor.labels_deltas.items()) |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
315 |
block = compressor.flush() |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
316 |
raw_bytes = block.to_bytes() |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
317 |
# Go through from_bytes(to_bytes()) so that we start with a compressed
|
318 |
# content object
|
|
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
319 |
return locs, groupcompress.GroupCompressBlock.from_bytes(raw_bytes) |
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
320 |
|
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
321 |
def test_from_empty_bytes(self): |
|
3735.31.1
by John Arbash Meinel
Bring the groupcompress plugin into the brisbane-core branch. |
322 |
self.assertRaises(ValueError, |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
323 |
groupcompress.GroupCompressBlock.from_bytes, b'') |
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
324 |
|
|
0.25.4
by John Arbash Meinel
We at least have the rudimentary ability to encode and decode values. |
325 |
def test_from_minimal_bytes(self): |
|
3735.32.4
by John Arbash Meinel
Change the byte representation of a groupcompress block. |
326 |
block = groupcompress.GroupCompressBlock.from_bytes( |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
327 |
b'gcb1z\n0\n0\n') |
|
0.25.4
by John Arbash Meinel
We at least have the rudimentary ability to encode and decode values. |
328 |
self.assertIsInstance(block, groupcompress.GroupCompressBlock) |
|
3735.32.6
by John Arbash Meinel
A bit of reworking changes things so content is expanded at extract() time. |
329 |
self.assertIs(None, block._content) |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
330 |
self.assertEqual(b'', block._z_content) |
|
3735.32.6
by John Arbash Meinel
A bit of reworking changes things so content is expanded at extract() time. |
331 |
block._ensure_content() |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
332 |
self.assertEqual(b'', block._content) |
333 |
self.assertEqual(b'', block._z_content) |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
334 |
block._ensure_content() # Ensure content is safe to call 2x |
|
0.25.4
by John Arbash Meinel
We at least have the rudimentary ability to encode and decode values. |
335 |
|
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
336 |
def test_from_invalid(self): |
337 |
self.assertRaises(ValueError, |
|
338 |
groupcompress.GroupCompressBlock.from_bytes, |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
339 |
b'this is not a valid header') |
|
4241.6.6
by Robert Collins, John Arbash Meinel, Ian Clathworthy, Vincent Ladeuil
Groupcompress from brisbane-core. |
340 |
|
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
341 |
def test_from_bytes(self): |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
342 |
content = (b'a tiny bit of content\n') |
|
3735.32.4
by John Arbash Meinel
Change the byte representation of a groupcompress block. |
343 |
z_content = zlib.compress(content) |
344 |
z_bytes = ( |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
345 |
b'gcb1z\n' # group compress block v1 plain |
346 |
b'%d\n' # Length of compressed content |
|
347 |
b'%d\n' # Length of uncompressed content |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
348 |
b'%s' # Compressed content |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
349 |
) % (len(z_content), len(content), z_content) |
|
0.25.6
by John Arbash Meinel
(tests broken) implement the basic ability to have a separate header |
350 |
block = groupcompress.GroupCompressBlock.from_bytes( |
|
3735.32.4
by John Arbash Meinel
Change the byte representation of a groupcompress block. |
351 |
z_bytes) |
|
3735.32.6
by John Arbash Meinel
A bit of reworking changes things so content is expanded at extract() time. |
352 |
self.assertEqual(z_content, block._z_content) |
353 |
self.assertIs(None, block._content) |
|
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
354 |
self.assertEqual(len(z_content), block._z_content_length) |
355 |
self.assertEqual(len(content), block._content_length) |
|
|
3735.32.10
by John Arbash Meinel
test that we support reading from the gc blocks that didn't have their lengths. |
356 |
block._ensure_content() |
|
3735.32.27
by John Arbash Meinel
Have _LazyGroupContentManager pre-extract everything it holds. |
357 |
self.assertEqual(z_content, block._z_content) |
|
3735.32.10
by John Arbash Meinel
test that we support reading from the gc blocks that didn't have their lengths. |
358 |
self.assertEqual(content, block._content) |
359 |
||
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
360 |
def test_to_chunks(self): |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
361 |
content_chunks = [b'this is some content\n', |
362 |
b'this content will be compressed\n'] |
|
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
363 |
content_len = sum(map(len, content_chunks)) |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
364 |
content = b''.join(content_chunks) |
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
365 |
gcb = groupcompress.GroupCompressBlock() |
366 |
gcb.set_chunked_content(content_chunks, content_len) |
|
367 |
total_len, block_chunks = gcb.to_chunks() |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
368 |
block_bytes = b''.join(block_chunks) |
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
369 |
self.assertEqual(gcb._z_content_length, len(gcb._z_content)) |
370 |
self.assertEqual(total_len, len(block_bytes)) |
|
371 |
self.assertEqual(gcb._content_length, content_len) |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
372 |
expected_header = (b'gcb1z\n' # group compress block v1 zlib |
373 |
b'%d\n' # Length of compressed content |
|
374 |
b'%d\n' # Length of uncompressed content |
|
375 |
) % (gcb._z_content_length, gcb._content_length) |
|
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
376 |
# The first chunk should be the header chunk. It is small, fixed size,
|
377 |
# and there is no compelling reason to split it up
|
|
378 |
self.assertEqual(expected_header, block_chunks[0]) |
|
379 |
self.assertStartsWith(block_bytes, expected_header) |
|
380 |
remaining_bytes = block_bytes[len(expected_header):] |
|
381 |
raw_bytes = zlib.decompress(remaining_bytes) |
|
382 |
self.assertEqual(content, raw_bytes) |
|
383 |
||
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
384 |
def test_to_bytes(self): |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
385 |
content = (b'this is some content\n' |
386 |
b'this content will be compressed\n') |
|
|
0.25.2
by John Arbash Meinel
First cut at meta-info as text form. |
387 |
gcb = groupcompress.GroupCompressBlock() |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
388 |
gcb.set_content(content) |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
389 |
data = gcb.to_bytes() |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
390 |
self.assertEqual(gcb._z_content_length, len(gcb._z_content)) |
391 |
self.assertEqual(gcb._content_length, len(content)) |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
392 |
expected_header = (b'gcb1z\n' # group compress block v1 zlib |
393 |
b'%d\n' # Length of compressed content |
|
394 |
b'%d\n' # Length of uncompressed content |
|
395 |
) % (gcb._z_content_length, gcb._content_length) |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
396 |
self.assertStartsWith(data, expected_header) |
397 |
remaining_bytes = data[len(expected_header):] |
|
|
0.25.5
by John Arbash Meinel
Now using a zlib compressed format. |
398 |
raw_bytes = zlib.decompress(remaining_bytes) |
|
3735.38.4
by John Arbash Meinel
Another disk format change. |
399 |
self.assertEqual(content, raw_bytes) |
|
3735.32.3
by John Arbash Meinel
Start doing some direct GCVF tests. |
400 |
|
|
4469.1.1
by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock. |
401 |
# we should get the same results if using the chunked version
|
402 |
gcb = groupcompress.GroupCompressBlock() |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
403 |
gcb.set_chunked_content([b'this is some content\n' |
404 |
b'this content will be compressed\n'], |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
405 |
len(content)) |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
406 |
old_data = data |
407 |
data = gcb.to_bytes() |
|
408 |
self.assertEqual(old_data, data) |
|
|
4469.1.1
by John Arbash Meinel
Add a set_content_chunked member to GroupCompressBlock. |
409 |
|
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
410 |
def test_partial_decomp(self): |
411 |
content_chunks = [] |
|
412 |
# We need a sufficient amount of data so that zlib.decompress has
|
|
413 |
# partial decompression to work with. Most auto-generated data
|
|
414 |
# compresses a bit too well, we want a combination, so we combine a sha
|
|
415 |
# hash with compressible data.
|
|
|
6651.2.2
by Martin
Apply 2to3 xrange fix and fix up with sixish range |
416 |
for i in range(2048): |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
417 |
next_content = b'%d\nThis is a bit of duplicate text\n' % (i,) |
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
418 |
content_chunks.append(next_content) |
419 |
next_sha1 = osutils.sha_string(next_content) |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
420 |
content_chunks.append(next_sha1 + b'\n') |
421 |
content = b''.join(content_chunks) |
|
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
422 |
self.assertEqual(158634, len(content)) |
423 |
z_content = zlib.compress(content) |
|
424 |
self.assertEqual(57182, len(z_content)) |
|
425 |
block = groupcompress.GroupCompressBlock() |
|
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
426 |
block._z_content_chunks = (z_content,) |
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
427 |
block._z_content_length = len(z_content) |
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
428 |
block._compressor_name = 'zlib' |
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
429 |
block._content_length = 158634 |
430 |
self.assertIs(None, block._content) |
|
431 |
block._ensure_content(100) |
|
432 |
self.assertIsNot(None, block._content) |
|
433 |
# We have decompressed at least 100 bytes
|
|
434 |
self.assertTrue(len(block._content) >= 100) |
|
435 |
# We have not decompressed the whole content
|
|
436 |
self.assertTrue(len(block._content) < 158634) |
|
437 |
self.assertEqualDiff(content[:len(block._content)], block._content) |
|
438 |
# ensuring content that we already have shouldn't cause any more data
|
|
439 |
# to be extracted
|
|
440 |
cur_len = len(block._content) |
|
441 |
block._ensure_content(cur_len - 10) |
|
442 |
self.assertEqual(cur_len, len(block._content)) |
|
443 |
# Now we want a bit more content
|
|
444 |
cur_len += 10 |
|
445 |
block._ensure_content(cur_len) |
|
446 |
self.assertTrue(len(block._content) >= cur_len) |
|
447 |
self.assertTrue(len(block._content) < 158634) |
|
448 |
self.assertEqualDiff(content[:len(block._content)], block._content) |
|
449 |
# And now lets finish
|
|
450 |
block._ensure_content(158634) |
|
451 |
self.assertEqualDiff(content, block._content) |
|
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
452 |
# And the decompressor is finalized
|
|
3735.32.7
by John Arbash Meinel
Implement partial decompression support. |
453 |
self.assertIs(None, block._z_content_decompressor) |
454 |
||
|
4744.2.3
by John Arbash Meinel
change the GroupcompressBlock code a bit. |
455 |
def test__ensure_all_content(self): |
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
456 |
content_chunks = [] |
|
4744.2.3
by John Arbash Meinel
change the GroupcompressBlock code a bit. |
457 |
# We need a sufficient amount of data so that zlib.decompress has
|
458 |
# partial decompression to work with. Most auto-generated data
|
|
459 |
# compresses a bit too well, we want a combination, so we combine a sha
|
|
460 |
# hash with compressible data.
|
|
|
6651.2.2
by Martin
Apply 2to3 xrange fix and fix up with sixish range |
461 |
for i in range(2048): |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
462 |
next_content = b'%d\nThis is a bit of duplicate text\n' % (i,) |
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
463 |
content_chunks.append(next_content) |
464 |
next_sha1 = osutils.sha_string(next_content) |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
465 |
content_chunks.append(next_sha1 + b'\n') |
466 |
content = b''.join(content_chunks) |
|
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
467 |
self.assertEqual(158634, len(content)) |
468 |
z_content = zlib.compress(content) |
|
469 |
self.assertEqual(57182, len(z_content)) |
|
470 |
block = groupcompress.GroupCompressBlock() |
|
|
5439.2.1
by John Arbash Meinel
Change GroupCompressBlock to work in self._z_compress_chunks |
471 |
block._z_content_chunks = (z_content,) |
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
472 |
block._z_content_length = len(z_content) |
473 |
block._compressor_name = 'zlib' |
|
|
4744.2.3
by John Arbash Meinel
change the GroupcompressBlock code a bit. |
474 |
block._content_length = 158634 |
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
475 |
self.assertIs(None, block._content) |
|
4744.2.3
by John Arbash Meinel
change the GroupcompressBlock code a bit. |
476 |
# The first _ensure_content got all of the required data
|
477 |
block._ensure_content(158634) |
|
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
478 |
self.assertEqualDiff(content, block._content) |
|
4744.2.3
by John Arbash Meinel
change the GroupcompressBlock code a bit. |
479 |
# And we should have released the _z_content_decompressor since it was
|
480 |
# fully consumed
|
|
|
3735.32.11
by John Arbash Meinel
Add tests for the ability to do partial decompression without knowing the final length. |
481 |
self.assertIs(None, block._z_content_decompressor) |
482 |
||
|
4300.1.1
by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form. |
483 |
def test__dump(self): |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
484 |
dup_content = b'some duplicate content\nwhich is sufficiently long\n' |
485 |
key_to_text = {(b'1',): dup_content + b'1 unique\n', |
|
486 |
(b'2',): dup_content + b'2 extra special\n'} |
|
|
4300.1.1
by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form. |
487 |
locs, block = self.make_block(key_to_text) |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
488 |
self.assertEqual([(b'f', len(key_to_text[(b'1',)])), |
489 |
(b'd', 21, len(key_to_text[(b'2',)]), |
|
490 |
[(b'c', 2, len(dup_content)), |
|
491 |
(b'i', len(b'2 extra special\n'), b'') |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
492 |
]),
|
493 |
], block._dump()) |
|
|
4300.1.1
by John Arbash Meinel
Add the ability to convert a gc block into 'human readable' form. |
494 |
|
|
3735.32.3
by John Arbash Meinel
Start doing some direct GCVF tests. |
495 |
|
|
4744.2.5
by John Arbash Meinel
Change to a generic 'VersionedFiles.clear_cache()' api. |
496 |
class TestCaseWithGroupCompressVersionedFiles( |
497 |
tests.TestCaseWithMemoryTransport): |
|
|
3735.32.3
by John Arbash Meinel
Start doing some direct GCVF tests. |
498 |
|
|
3735.32.20
by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given. |
499 |
def make_test_vf(self, create_graph, keylength=1, do_cleanup=True, |
|
4465.2.4
by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal. |
500 |
dir='.', inconsistency_fatal=True): |
|
3735.32.20
by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given. |
501 |
t = self.get_transport(dir) |
502 |
t.ensure_base() |
|
|
3735.32.3
by John Arbash Meinel
Start doing some direct GCVF tests. |
503 |
vf = groupcompress.make_pack_factory(graph=create_graph, |
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
504 |
delta=False, keylength=keylength, |
505 |
inconsistency_fatal=inconsistency_fatal)(t) |
|
|
3735.32.3
by John Arbash Meinel
Start doing some direct GCVF tests. |
506 |
if do_cleanup: |
507 |
self.addCleanup(groupcompress.cleanup_pack_group, vf) |
|
508 |
return vf |
|
509 |
||
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
510 |
|
511 |
class TestGroupCompressVersionedFiles(TestCaseWithGroupCompressVersionedFiles): |
|
512 |
||
|
4343.3.20
by John Arbash Meinel
Copy the track_external_parent_refs tests over to GCVF. |
513 |
def make_g_index(self, name, ref_lists=0, nodes=[]): |
514 |
builder = btree_index.BTreeBuilder(ref_lists) |
|
515 |
for node, references, value in nodes: |
|
516 |
builder.add_node(node, references, value) |
|
517 |
stream = builder.finish() |
|
518 |
trans = self.get_transport() |
|
519 |
size = trans.put_file(name, stream) |
|
520 |
return btree_index.BTreeGraphIndex(trans, name, size) |
|
521 |
||
522 |
def make_g_index_missing_parent(self): |
|
|
7045.3.1
by Jelmer Vernooij
Fix another ~500 tests. |
523 |
graph_index = self.make_g_index('missing_parent', 1, |
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
524 |
[((b'parent', ), b'2 78 2 10', ([],)), |
525 |
((b'tip', ), b'2 78 2 10', |
|
526 |
([(b'parent', ), (b'missing-parent', )],)), |
|
527 |
])
|
|
|
4343.3.20
by John Arbash Meinel
Copy the track_external_parent_refs tests over to GCVF. |
528 |
return graph_index |
529 |
||
|
3735.32.3
by John Arbash Meinel
Start doing some direct GCVF tests. |
530 |
def test_get_record_stream_as_requested(self): |
531 |
# Consider promoting 'as-requested' to general availability, and
|
|
532 |
# make this a VF interface test
|
|
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
533 |
vf = self.make_test_vf(False, dir='source') |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
534 |
vf.add_lines((b'a',), (), [b'lines\n']) |
535 |
vf.add_lines((b'b',), (), [b'lines\n']) |
|
536 |
vf.add_lines((b'c',), (), [b'lines\n']) |
|
537 |
vf.add_lines((b'd',), (), [b'lines\n']) |
|
|
3735.32.3
by John Arbash Meinel
Start doing some direct GCVF tests. |
538 |
vf.writer.end() |
539 |
keys = [record.key for record in vf.get_record_stream( |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
540 |
[(b'a',), (b'b',), (b'c',), (b'd',)], |
541 |
'as-requested', False)] |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
542 |
self.assertEqual([(b'a',), (b'b',), (b'c',), (b'd',)], keys) |
|
3735.32.3
by John Arbash Meinel
Start doing some direct GCVF tests. |
543 |
keys = [record.key for record in vf.get_record_stream( |
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
544 |
[(b'b',), (b'a',), (b'd',), (b'c',)], |
545 |
'as-requested', False)] |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
546 |
self.assertEqual([(b'b',), (b'a',), (b'd',), (b'c',)], keys) |
|
3735.32.3
by John Arbash Meinel
Start doing some direct GCVF tests. |
547 |
|
548 |
# It should work even after being repacked into another VF
|
|
|
3735.32.20
by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given. |
549 |
vf2 = self.make_test_vf(False, dir='target') |
|
3735.32.3
by John Arbash Meinel
Start doing some direct GCVF tests. |
550 |
vf2.insert_record_stream(vf.get_record_stream( |
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
551 |
[(b'b',), (b'a',), (b'd',), (b'c',)], 'as-requested', False)) |
|
3735.32.3
by John Arbash Meinel
Start doing some direct GCVF tests. |
552 |
vf2.writer.end() |
553 |
||
554 |
keys = [record.key for record in vf2.get_record_stream( |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
555 |
[(b'a',), (b'b',), (b'c',), (b'd',)], |
556 |
'as-requested', False)] |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
557 |
self.assertEqual([(b'a',), (b'b',), (b'c',), (b'd',)], keys) |
|
3735.32.3
by John Arbash Meinel
Start doing some direct GCVF tests. |
558 |
keys = [record.key for record in vf2.get_record_stream( |
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
559 |
[(b'b',), (b'a',), (b'd',), (b'c',)], |
560 |
'as-requested', False)] |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
561 |
self.assertEqual([(b'b',), (b'a',), (b'd',), (b'c',)], keys) |
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
562 |
|
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
563 |
def test_get_record_stream_max_bytes_to_index_default(self): |
|
5755.2.6
by John Arbash Meinel
Test that the record stream has the correct values set. |
564 |
vf = self.make_test_vf(True, dir='source') |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
565 |
vf.add_lines((b'a',), (), [b'lines\n']) |
|
5755.2.6
by John Arbash Meinel
Test that the record stream has the correct values set. |
566 |
vf.writer.end() |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
567 |
record = next(vf.get_record_stream([(b'a',)], 'unordered', True)) |
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
568 |
self.assertEqual(vf._DEFAULT_COMPRESSOR_SETTINGS, |
569 |
record._manager._get_compressor_settings()) |
|
|
5755.2.6
by John Arbash Meinel
Test that the record stream has the correct values set. |
570 |
|
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
571 |
def test_get_record_stream_accesses_compressor_settings(self): |
|
5755.2.6
by John Arbash Meinel
Test that the record stream has the correct values set. |
572 |
vf = self.make_test_vf(True, dir='source') |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
573 |
vf.add_lines((b'a',), (), [b'lines\n']) |
|
5755.2.6
by John Arbash Meinel
Test that the record stream has the correct values set. |
574 |
vf.writer.end() |
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
575 |
vf._max_bytes_to_index = 1234 |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
576 |
record = next(vf.get_record_stream([(b'a',)], 'unordered', True)) |
|
5755.2.9
by John Arbash Meinel
Change settings to a dict. That way the attributes are still named. |
577 |
self.assertEqual(dict(max_bytes_to_index=1234), |
578 |
record._manager._get_compressor_settings()) |
|
|
5755.2.6
by John Arbash Meinel
Test that the record stream has the correct values set. |
579 |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
580 |
@staticmethod
|
581 |
def grouped_stream(revision_ids, first_parents=()): |
|
582 |
parents = first_parents |
|
583 |
for revision_id in revision_ids: |
|
584 |
key = (revision_id,) |
|
585 |
record = versionedfile.FulltextContentFactory( |
|
586 |
key, parents, None, |
|
587 |
b'some content that is\n' |
|
588 |
b'identical except for\n' |
|
589 |
b'revision_id:%s\n' % (revision_id,)) |
|
590 |
yield record |
|
591 |
parents = (key,) |
|
592 |
||
|
4665.3.9
by John Arbash Meinel
Start doing some work to make sure that we call _check_rebuild_block |
593 |
def test_insert_record_stream_reuses_blocks(self): |
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
594 |
vf = self.make_test_vf(True, dir='source') |
|
3735.32.20
by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given. |
595 |
# One group, a-d
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
596 |
vf.insert_record_stream(self.grouped_stream([b'a', b'b', b'c', b'd'])) |
|
3735.32.20
by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given. |
597 |
# Second group, e-h
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
598 |
vf.insert_record_stream(self.grouped_stream( |
599 |
[b'e', b'f', b'g', b'h'], first_parents=((b'd',),))) |
|
|
3735.32.20
by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given. |
600 |
block_bytes = {} |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
601 |
stream = vf.get_record_stream( |
602 |
[(r.encode(),) for r in 'abcdefgh'], 'unordered', False) |
|
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
603 |
num_records = 0 |
|
3735.32.20
by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given. |
604 |
for record in stream: |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
605 |
if record.key in [(b'a',), (b'e',)]: |
|
3735.32.20
by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given. |
606 |
self.assertEqual('groupcompress-block', record.storage_kind) |
607 |
else: |
|
608 |
self.assertEqual('groupcompress-block-ref', |
|
609 |
record.storage_kind) |
|
610 |
block_bytes[record.key] = record._manager._block._z_content |
|
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
611 |
num_records += 1 |
612 |
self.assertEqual(8, num_records) |
|
|
3735.32.20
by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given. |
613 |
for r in 'abcd': |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
614 |
key = (r.encode(),) |
615 |
self.assertIs(block_bytes[key], block_bytes[(b'a',)]) |
|
616 |
self.assertNotEqual(block_bytes[key], block_bytes[(b'e',)]) |
|
|
3735.32.20
by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given. |
617 |
for r in 'efgh': |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
618 |
key = (r.encode(),) |
619 |
self.assertIs(block_bytes[key], block_bytes[(b'e',)]) |
|
620 |
self.assertNotEqual(block_bytes[key], block_bytes[(b'a',)]) |
|
|
3735.32.20
by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given. |
621 |
# Now copy the blocks into another vf, and ensure that the blocks are
|
622 |
# preserved without creating new entries
|
|
623 |
vf2 = self.make_test_vf(True, dir='target') |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
624 |
keys = [(r.encode(),) for r in 'abcdefgh'] |
|
3735.32.20
by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given. |
625 |
# ordering in 'groupcompress' order, should actually swap the groups in
|
626 |
# the target vf, but the groups themselves should not be disturbed.
|
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
627 |
|
|
4665.3.9
by John Arbash Meinel
Start doing some work to make sure that we call _check_rebuild_block |
628 |
def small_size_stream(): |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
629 |
for record in vf.get_record_stream(keys, 'groupcompress', False): |
|
4665.3.9
by John Arbash Meinel
Start doing some work to make sure that we call _check_rebuild_block |
630 |
record._manager._full_enough_block_size = \ |
631 |
record._manager._block._content_length |
|
632 |
yield record |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
633 |
|
|
4665.3.9
by John Arbash Meinel
Start doing some work to make sure that we call _check_rebuild_block |
634 |
vf2.insert_record_stream(small_size_stream()) |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
635 |
stream = vf2.get_record_stream(keys, 'groupcompress', False) |
|
3735.32.20
by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given. |
636 |
vf2.writer.end() |
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
637 |
num_records = 0 |
|
3735.32.20
by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given. |
638 |
for record in stream: |
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
639 |
num_records += 1 |
|
3735.32.20
by John Arbash Meinel
groupcompress now copies the blocks exactly as they were given. |
640 |
self.assertEqual(block_bytes[record.key], |
641 |
record._manager._block._z_content) |
|
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
642 |
self.assertEqual(8, num_records) |
643 |
||
|
4665.3.9
by John Arbash Meinel
Start doing some work to make sure that we call _check_rebuild_block |
644 |
def test_insert_record_stream_packs_on_the_fly(self): |
645 |
vf = self.make_test_vf(True, dir='source') |
|
646 |
# One group, a-d
|
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
647 |
vf.insert_record_stream(self.grouped_stream([b'a', b'b', b'c', b'd'])) |
|
4665.3.9
by John Arbash Meinel
Start doing some work to make sure that we call _check_rebuild_block |
648 |
# Second group, e-h
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
649 |
vf.insert_record_stream(self.grouped_stream( |
650 |
[b'e', b'f', b'g', b'h'], first_parents=((b'd',),))) |
|
|
4665.3.9
by John Arbash Meinel
Start doing some work to make sure that we call _check_rebuild_block |
651 |
# Now copy the blocks into another vf, and see that the
|
652 |
# insert_record_stream rebuilt a new block on-the-fly because of
|
|
653 |
# under-utilization
|
|
654 |
vf2 = self.make_test_vf(True, dir='target') |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
655 |
keys = [(r.encode(),) for r in 'abcdefgh'] |
|
4665.3.9
by John Arbash Meinel
Start doing some work to make sure that we call _check_rebuild_block |
656 |
vf2.insert_record_stream(vf.get_record_stream( |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
657 |
keys, 'groupcompress', False)) |
658 |
stream = vf2.get_record_stream(keys, 'groupcompress', False) |
|
|
4665.3.9
by John Arbash Meinel
Start doing some work to make sure that we call _check_rebuild_block |
659 |
vf2.writer.end() |
660 |
num_records = 0 |
|
661 |
# All of the records should be recombined into a single block
|
|
662 |
block = None |
|
663 |
for record in stream: |
|
664 |
num_records += 1 |
|
665 |
if block is None: |
|
666 |
block = record._manager._block |
|
667 |
else: |
|
668 |
self.assertIs(block, record._manager._block) |
|
669 |
self.assertEqual(8, num_records) |
|
670 |
||
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
671 |
def test__insert_record_stream_no_reuse_block(self): |
672 |
vf = self.make_test_vf(True, dir='source') |
|
673 |
# One group, a-d
|
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
674 |
vf.insert_record_stream(self.grouped_stream([b'a', b'b', b'c', b'd'])) |
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
675 |
# Second group, e-h
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
676 |
vf.insert_record_stream(self.grouped_stream( |
677 |
[b'e', b'f', b'g', b'h'], first_parents=((b'd',),))) |
|
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
678 |
vf.writer.end() |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
679 |
keys = [(r.encode(),) for r in 'abcdefgh'] |
680 |
self.assertEqual(8, len(list( |
|
681 |
vf.get_record_stream(keys, 'unordered', False)))) |
|
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
682 |
# Now copy the blocks into another vf, and ensure that the blocks are
|
683 |
# preserved without creating new entries
|
|
684 |
vf2 = self.make_test_vf(True, dir='target') |
|
685 |
# ordering in 'groupcompress' order, should actually swap the groups in
|
|
686 |
# the target vf, but the groups themselves should not be disturbed.
|
|
687 |
list(vf2._insert_record_stream(vf.get_record_stream( |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
688 |
keys, 'groupcompress', False), |
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
689 |
reuse_blocks=False)) |
690 |
vf2.writer.end() |
|
691 |
# After inserting with reuse_blocks=False, we should have everything in
|
|
692 |
# a single new block.
|
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
693 |
stream = vf2.get_record_stream(keys, 'groupcompress', False) |
|
3735.32.21
by John Arbash Meinel
We now have a 'reuse_blocks=False' flag for autopack et al. |
694 |
block = None |
695 |
for record in stream: |
|
696 |
if block is None: |
|
697 |
block = record._manager._block |
|
698 |
else: |
|
699 |
self.assertIs(block, record._manager._block) |
|
700 |
||
|
4343.3.20
by John Arbash Meinel
Copy the track_external_parent_refs tests over to GCVF. |
701 |
def test_add_missing_noncompression_parent_unvalidated_index(self): |
702 |
unvalidated = self.make_g_index_missing_parent() |
|
703 |
combined = _mod_index.CombinedGraphIndex([unvalidated]) |
|
704 |
index = groupcompress._GCGraphIndex(combined, |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
705 |
is_locked=lambda: True, parents=True, |
706 |
track_external_parent_refs=True) |
|
|
4343.3.20
by John Arbash Meinel
Copy the track_external_parent_refs tests over to GCVF. |
707 |
index.scan_unvalidated_index(unvalidated) |
708 |
self.assertEqual( |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
709 |
frozenset([(b'missing-parent',)]), index.get_missing_parents()) |
|
4343.3.20
by John Arbash Meinel
Copy the track_external_parent_refs tests over to GCVF. |
710 |
|
711 |
def test_track_external_parent_refs(self): |
|
712 |
g_index = self.make_g_index('empty', 1, []) |
|
713 |
mod_index = btree_index.BTreeBuilder(1, 1) |
|
714 |
combined = _mod_index.CombinedGraphIndex([g_index, mod_index]) |
|
715 |
index = groupcompress._GCGraphIndex(combined, |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
716 |
is_locked=lambda: True, parents=True, |
717 |
add_callback=mod_index.add_nodes, |
|
718 |
track_external_parent_refs=True) |
|
|
4343.3.20
by John Arbash Meinel
Copy the track_external_parent_refs tests over to GCVF. |
719 |
index.add_records([ |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
720 |
((b'new-key',), b'2 10 2 10', [((b'parent-1',), (b'parent-2',))])]) |
|
4343.3.20
by John Arbash Meinel
Copy the track_external_parent_refs tests over to GCVF. |
721 |
self.assertEqual( |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
722 |
frozenset([(b'parent-1',), (b'parent-2',)]), |
|
4343.3.20
by John Arbash Meinel
Copy the track_external_parent_refs tests over to GCVF. |
723 |
index.get_missing_parents()) |
724 |
||
|
4465.2.3
by Aaron Bentley
Update to change redundant inserts into a warning. |
725 |
def make_source_with_b(self, a_parent, path): |
726 |
source = self.make_test_vf(True, dir=path) |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
727 |
source.add_lines((b'a',), (), [b'lines\n']) |
|
4465.2.3
by Aaron Bentley
Update to change redundant inserts into a warning. |
728 |
if a_parent: |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
729 |
b_parents = ((b'a',),) |
|
4465.2.3
by Aaron Bentley
Update to change redundant inserts into a warning. |
730 |
else: |
731 |
b_parents = () |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
732 |
source.add_lines((b'b',), b_parents, [b'lines\n']) |
|
4465.2.3
by Aaron Bentley
Update to change redundant inserts into a warning. |
733 |
return source |
734 |
||
|
4465.2.4
by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal. |
735 |
def do_inconsistent_inserts(self, inconsistency_fatal): |
736 |
target = self.make_test_vf(True, dir='target', |
|
737 |
inconsistency_fatal=inconsistency_fatal) |
|
738 |
for x in range(2): |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
739 |
source = self.make_source_with_b(x == 1, 'source%s' % x) |
|
4465.2.4
by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal. |
740 |
target.insert_record_stream(source.get_record_stream( |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
741 |
[(b'b',)], 'unordered', False)) |
|
4465.2.4
by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal. |
742 |
|
|
4465.2.3
by Aaron Bentley
Update to change redundant inserts into a warning. |
743 |
def test_inconsistent_redundant_inserts_warn(self): |
|
4465.2.2
by Aaron Bentley
Add test that duplicates are skipped. |
744 |
"""Should not insert a record that is already present.""" |
|
4465.2.3
by Aaron Bentley
Update to change redundant inserts into a warning. |
745 |
warnings = [] |
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
746 |
|
|
4465.2.3
by Aaron Bentley
Update to change redundant inserts into a warning. |
747 |
def warning(template, args): |
748 |
warnings.append(template % args) |
|
749 |
_trace_warning = trace.warning |
|
750 |
trace.warning = warning |
|
751 |
try: |
|
|
4465.2.4
by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal. |
752 |
self.do_inconsistent_inserts(inconsistency_fatal=False) |
|
4465.2.3
by Aaron Bentley
Update to change redundant inserts into a warning. |
753 |
finally: |
754 |
trace.warning = _trace_warning |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
755 |
self.assertContainsRe( |
756 |
"\n".join(warnings), |
|
757 |
r"^inconsistent details in skipped record: \(b?'b',\)" |
|
758 |
r" \(b?'42 32 0 8', \(\(\),\)\)" |
|
759 |
r" \(b?'74 32 0 8', \(\(\(b?'a',\),\),\)\)$") |
|
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
760 |
|
|
4465.2.4
by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal. |
761 |
def test_inconsistent_redundant_inserts_raises(self): |
|
6744
by Jelmer Vernooij
Merge lp:~jelmer/brz/move-errors-knit. |
762 |
e = self.assertRaises(knit.KnitCorrupt, self.do_inconsistent_inserts, |
|
4465.2.4
by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal. |
763 |
inconsistency_fatal=True) |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
764 |
self.assertContainsRe(str(e), r"Knit.* corrupt: inconsistent details" |
765 |
r" in add_records:" |
|
766 |
r" \(b?'b',\) \(b?'42 32 0 8', \(\(\),\)\)" |
|
767 |
r" \(b?'74 32 0 8', \(\(\(b?'a',\),\),\)\)") |
|
|
4465.2.4
by Aaron Bentley
Switch between warn and raise depending on inconsistent_fatal. |
768 |
|
|
4744.2.5
by John Arbash Meinel
Change to a generic 'VersionedFiles.clear_cache()' api. |
769 |
def test_clear_cache(self): |
770 |
vf = self.make_source_with_b(True, 'source') |
|
771 |
vf.writer.end() |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
772 |
for record in vf.get_record_stream([(b'a',), (b'b',)], 'unordered', |
|
4744.2.5
by John Arbash Meinel
Change to a generic 'VersionedFiles.clear_cache()' api. |
773 |
True): |
774 |
pass
|
|
775 |
self.assertTrue(len(vf._group_cache) > 0) |
|
776 |
vf.clear_cache() |
|
777 |
self.assertEqual(0, len(vf._group_cache)) |
|
778 |
||
779 |
||
|
5755.2.4
by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles |
780 |
class TestGroupCompressConfig(tests.TestCaseWithTransport): |
781 |
||
782 |
def make_test_vf(self): |
|
783 |
t = self.get_transport('.') |
|
784 |
t.ensure_base() |
|
785 |
factory = groupcompress.make_pack_factory(graph=True, |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
786 |
delta=False, keylength=1, inconsistency_fatal=True) |
|
5755.2.4
by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles |
787 |
vf = factory(t) |
788 |
self.addCleanup(groupcompress.cleanup_pack_group, vf) |
|
789 |
return vf |
|
790 |
||
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
791 |
def test_max_bytes_to_index_default(self): |
792 |
vf = self.make_test_vf() |
|
793 |
gc = vf._make_group_compressor() |
|
794 |
self.assertEqual(vf._DEFAULT_MAX_BYTES_TO_INDEX, |
|
795 |
vf._max_bytes_to_index) |
|
796 |
if isinstance(gc, groupcompress.PyrexGroupCompressor): |
|
797 |
self.assertEqual(vf._DEFAULT_MAX_BYTES_TO_INDEX, |
|
798 |
gc._delta_index._max_bytes_to_index) |
|
799 |
||
800 |
def test_max_bytes_to_index_in_config(self): |
|
801 |
c = config.GlobalConfig() |
|
802 |
c.set_user_option('bzr.groupcompress.max_bytes_to_index', '10000') |
|
803 |
vf = self.make_test_vf() |
|
804 |
gc = vf._make_group_compressor() |
|
805 |
self.assertEqual(10000, vf._max_bytes_to_index) |
|
806 |
if isinstance(gc, groupcompress.PyrexGroupCompressor): |
|
807 |
self.assertEqual(10000, gc._delta_index._max_bytes_to_index) |
|
808 |
||
809 |
def test_max_bytes_to_index_bad_config(self): |
|
810 |
c = config.GlobalConfig() |
|
811 |
c.set_user_option('bzr.groupcompress.max_bytes_to_index', 'boogah') |
|
|
5755.2.4
by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles |
812 |
vf = self.make_test_vf() |
813 |
# TODO: This is triggering a warning, we might want to trap and make
|
|
814 |
# sure it is readable.
|
|
815 |
gc = vf._make_group_compressor() |
|
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
816 |
self.assertEqual(vf._DEFAULT_MAX_BYTES_TO_INDEX, |
817 |
vf._max_bytes_to_index) |
|
|
5755.2.4
by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles |
818 |
if isinstance(gc, groupcompress.PyrexGroupCompressor): |
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
819 |
self.assertEqual(vf._DEFAULT_MAX_BYTES_TO_INDEX, |
820 |
gc._delta_index._max_bytes_to_index) |
|
|
5755.2.4
by John Arbash Meinel
Expose the max_entries_per_source into GroupCompressVersionedFiles |
821 |
|
822 |
||
|
4634.3.20
by Andrew Bennetts
Some basic whitebox unit tests for _BatchingBlockFetcher. |
823 |
class StubGCVF(object): |
|
4634.3.21
by Andrew Bennetts
Direct tests now have complete line coverage of _BatchingBlockFetcher (except for the assertion). |
824 |
def __init__(self, canned_get_blocks=None): |
|
4634.3.20
by Andrew Bennetts
Some basic whitebox unit tests for _BatchingBlockFetcher. |
825 |
self._group_cache = {} |
|
4634.3.21
by Andrew Bennetts
Direct tests now have complete line coverage of _BatchingBlockFetcher (except for the assertion). |
826 |
self._canned_get_blocks = canned_get_blocks or [] |
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
827 |
|
|
4634.3.21
by Andrew Bennetts
Direct tests now have complete line coverage of _BatchingBlockFetcher (except for the assertion). |
828 |
def _get_blocks(self, read_memos): |
829 |
return iter(self._canned_get_blocks) |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
830 |
|
|
4634.3.20
by Andrew Bennetts
Some basic whitebox unit tests for _BatchingBlockFetcher. |
831 |
|
832 |
class Test_BatchingBlockFetcher(TestCaseWithGroupCompressVersionedFiles): |
|
833 |
"""Simple whitebox unit tests for _BatchingBlockFetcher.""" |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
834 |
|
|
4634.3.20
by Andrew Bennetts
Some basic whitebox unit tests for _BatchingBlockFetcher. |
835 |
def test_add_key_new_read_memo(self): |
836 |
"""Adding a key with an uncached read_memo new to this batch adds that |
|
837 |
read_memo to the list of memos to fetch.
|
|
838 |
"""
|
|
839 |
# locations are: index_memo, ignored, parents, ignored
|
|
840 |
# where index_memo is: (idx, offset, len, factory_start, factory_end)
|
|
841 |
# and (idx, offset, size) is known as the 'read_memo', identifying the
|
|
842 |
# raw bytes needed.
|
|
843 |
read_memo = ('fake index', 100, 50) |
|
844 |
locations = { |
|
845 |
('key',): (read_memo + (None, None), None, None, None)} |
|
846 |
batcher = groupcompress._BatchingBlockFetcher(StubGCVF(), locations) |
|
847 |
total_size = batcher.add_key(('key',)) |
|
848 |
self.assertEqual(50, total_size) |
|
849 |
self.assertEqual([('key',)], batcher.keys) |
|
850 |
self.assertEqual([read_memo], batcher.memos_to_get) |
|
851 |
||
852 |
def test_add_key_duplicate_read_memo(self): |
|
853 |
"""read_memos that occur multiple times in a batch will only be fetched |
|
854 |
once.
|
|
855 |
"""
|
|
856 |
read_memo = ('fake index', 100, 50) |
|
857 |
# Two keys, both sharing the same read memo (but different overall
|
|
858 |
# index_memos).
|
|
859 |
locations = { |
|
860 |
('key1',): (read_memo + (0, 1), None, None, None), |
|
861 |
('key2',): (read_memo + (1, 2), None, None, None)} |
|
862 |
batcher = groupcompress._BatchingBlockFetcher(StubGCVF(), locations) |
|
863 |
total_size = batcher.add_key(('key1',)) |
|
864 |
total_size = batcher.add_key(('key2',)) |
|
865 |
self.assertEqual(50, total_size) |
|
866 |
self.assertEqual([('key1',), ('key2',)], batcher.keys) |
|
867 |
self.assertEqual([read_memo], batcher.memos_to_get) |
|
868 |
||
869 |
def test_add_key_cached_read_memo(self): |
|
870 |
"""Adding a key with a cached read_memo will not cause that read_memo |
|
871 |
to be added to the list to fetch.
|
|
872 |
"""
|
|
873 |
read_memo = ('fake index', 100, 50) |
|
874 |
gcvf = StubGCVF() |
|
875 |
gcvf._group_cache[read_memo] = 'fake block' |
|
876 |
locations = { |
|
877 |
('key',): (read_memo + (None, None), None, None, None)} |
|
878 |
batcher = groupcompress._BatchingBlockFetcher(gcvf, locations) |
|
879 |
total_size = batcher.add_key(('key',)) |
|
880 |
self.assertEqual(0, total_size) |
|
881 |
self.assertEqual([('key',)], batcher.keys) |
|
882 |
self.assertEqual([], batcher.memos_to_get) |
|
883 |
||
|
4634.3.21
by Andrew Bennetts
Direct tests now have complete line coverage of _BatchingBlockFetcher (except for the assertion). |
884 |
def test_yield_factories_empty(self): |
885 |
"""An empty batch yields no factories.""" |
|
886 |
batcher = groupcompress._BatchingBlockFetcher(StubGCVF(), {}) |
|
887 |
self.assertEqual([], list(batcher.yield_factories())) |
|
888 |
||
889 |
def test_yield_factories_calls_get_blocks(self): |
|
|
4634.3.22
by Andrew Bennetts
Fix docstring. |
890 |
"""Uncached memos are retrieved via get_blocks.""" |
|
4634.3.21
by Andrew Bennetts
Direct tests now have complete line coverage of _BatchingBlockFetcher (except for the assertion). |
891 |
read_memo1 = ('fake index', 100, 50) |
892 |
read_memo2 = ('fake index', 150, 40) |
|
893 |
gcvf = StubGCVF( |
|
894 |
canned_get_blocks=[ |
|
895 |
(read_memo1, groupcompress.GroupCompressBlock()), |
|
896 |
(read_memo2, groupcompress.GroupCompressBlock())]) |
|
897 |
locations = { |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
898 |
('key1',): (read_memo1 + (0, 0), None, None, None), |
899 |
('key2',): (read_memo2 + (0, 0), None, None, None)} |
|
|
4634.3.21
by Andrew Bennetts
Direct tests now have complete line coverage of _BatchingBlockFetcher (except for the assertion). |
900 |
batcher = groupcompress._BatchingBlockFetcher(gcvf, locations) |
901 |
batcher.add_key(('key1',)) |
|
902 |
batcher.add_key(('key2',)) |
|
903 |
factories = list(batcher.yield_factories(full_flush=True)) |
|
904 |
self.assertLength(2, factories) |
|
905 |
keys = [f.key for f in factories] |
|
906 |
kinds = [f.storage_kind for f in factories] |
|
907 |
self.assertEqual([('key1',), ('key2',)], keys) |
|
908 |
self.assertEqual(['groupcompress-block', 'groupcompress-block'], kinds) |
|
909 |
||
910 |
def test_yield_factories_flushing(self): |
|
911 |
"""yield_factories holds back on yielding results from the final block |
|
912 |
unless passed full_flush=True.
|
|
913 |
"""
|
|
914 |
fake_block = groupcompress.GroupCompressBlock() |
|
915 |
read_memo = ('fake index', 100, 50) |
|
916 |
gcvf = StubGCVF() |
|
917 |
gcvf._group_cache[read_memo] = fake_block |
|
918 |
locations = { |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
919 |
('key',): (read_memo + (0, 0), None, None, None)} |
|
4634.3.21
by Andrew Bennetts
Direct tests now have complete line coverage of _BatchingBlockFetcher (except for the assertion). |
920 |
batcher = groupcompress._BatchingBlockFetcher(gcvf, locations) |
921 |
batcher.add_key(('key',)) |
|
922 |
self.assertEqual([], list(batcher.yield_factories())) |
|
923 |
factories = list(batcher.yield_factories(full_flush=True)) |
|
924 |
self.assertLength(1, factories) |
|
925 |
self.assertEqual(('key',), factories[0].key) |
|
926 |
self.assertEqual('groupcompress-block', factories[0].storage_kind) |
|
927 |
||
|
4634.3.20
by Andrew Bennetts
Some basic whitebox unit tests for _BatchingBlockFetcher. |
928 |
|
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
929 |
class TestLazyGroupCompress(tests.TestCaseWithTransport): |
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
930 |
|
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
931 |
_texts = { |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
932 |
(b'key1',): b"this is a text\n" |
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
933 |
b"with a reasonable amount of compressible bytes\n" |
934 |
b"which can be shared between various other texts\n", |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
935 |
(b'key2',): b"another text\n" |
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
936 |
b"with a reasonable amount of compressible bytes\n" |
937 |
b"which can be shared between various other texts\n", |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
938 |
(b'key3',): b"yet another text which won't be extracted\n" |
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
939 |
b"with a reasonable amount of compressible bytes\n" |
940 |
b"which can be shared between various other texts\n", |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
941 |
(b'key4',): b"this will be extracted\n" |
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
942 |
b"but references most of its bytes from\n" |
943 |
b"yet another text which won't be extracted\n" |
|
944 |
b"with a reasonable amount of compressible bytes\n" |
|
945 |
b"which can be shared between various other texts\n", |
|
|
3735.32.14
by John Arbash Meinel
Move the tests over to testing the LazyGroupContentManager object. |
946 |
}
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
947 |
|
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
948 |
def make_block(self, key_to_text): |
949 |
"""Create a GroupCompressBlock, filling it with the given texts.""" |
|
950 |
compressor = groupcompress.GroupCompressor() |
|
951 |
start = 0 |
|
952 |
for key in sorted(key_to_text): |
|
|
7459.2.1
by Jelmer Vernooij
Pass chunks to compress(). |
953 |
compressor.compress(key, [key_to_text[key]], None) |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
954 |
locs = dict((key, (start, end)) for key, (start, _, end, _) |
|
6656.1.1
by Martin
Apply 2to3 dict fixer and clean up resulting mess using view helpers |
955 |
in compressor.labels_deltas.items()) |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
956 |
block = compressor.flush() |
957 |
raw_bytes = block.to_bytes() |
|
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
958 |
return locs, groupcompress.GroupCompressBlock.from_bytes(raw_bytes) |
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
959 |
|
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
960 |
def add_key_to_manager(self, key, locations, block, manager): |
961 |
start, end = locations[key] |
|
962 |
manager.add_factory(key, (), start, end) |
|
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
963 |
|
|
4665.3.7
by John Arbash Meinel
We needed a bit more data to actually get groups doing delta-compression. |
964 |
def make_block_and_full_manager(self, texts): |
965 |
locations, block = self.make_block(texts) |
|
966 |
manager = groupcompress._LazyGroupContentManager(block) |
|
967 |
for key in sorted(texts): |
|
968 |
self.add_key_to_manager(key, locations, block, manager) |
|
969 |
return block, manager |
|
970 |
||
|
3735.32.8
by John Arbash Meinel
Some tests for the LazyGroupCompressFactory |
971 |
def test_get_fulltexts(self): |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
972 |
locations, block = self.make_block(self._texts) |
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
973 |
manager = groupcompress._LazyGroupContentManager(block) |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
974 |
self.add_key_to_manager((b'key1',), locations, block, manager) |
975 |
self.add_key_to_manager((b'key2',), locations, block, manager) |
|
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
976 |
result_order = [] |
977 |
for record in manager.get_record_stream(): |
|
978 |
result_order.append(record.key) |
|
979 |
text = self._texts[record.key] |
|
980 |
self.assertEqual(text, record.get_bytes_as('fulltext')) |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
981 |
self.assertEqual([(b'key1',), (b'key2',)], result_order) |
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
982 |
|
983 |
# If we build the manager in the opposite order, we should get them
|
|
984 |
# back in the opposite order
|
|
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
985 |
manager = groupcompress._LazyGroupContentManager(block) |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
986 |
self.add_key_to_manager((b'key2',), locations, block, manager) |
987 |
self.add_key_to_manager((b'key1',), locations, block, manager) |
|
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
988 |
result_order = [] |
989 |
for record in manager.get_record_stream(): |
|
990 |
result_order.append(record.key) |
|
991 |
text = self._texts[record.key] |
|
992 |
self.assertEqual(text, record.get_bytes_as('fulltext')) |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
993 |
self.assertEqual([(b'key2',), (b'key1',)], result_order) |
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
994 |
|
|
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
995 |
def test__wire_bytes_no_keys(self): |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
996 |
locations, block = self.make_block(self._texts) |
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
997 |
manager = groupcompress._LazyGroupContentManager(block) |
|
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
998 |
wire_bytes = manager._wire_bytes() |
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
999 |
block_length = len(block.to_bytes()) |
|
3735.32.24
by John Arbash Meinel
_wire_bytes() now strips groups as necessary, as does _insert_record_stream |
1000 |
# We should have triggered a strip, since we aren't using any content
|
1001 |
stripped_block = manager._block.to_bytes() |
|
1002 |
self.assertTrue(block_length > len(stripped_block)) |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
1003 |
empty_z_header = zlib.compress(b'') |
1004 |
self.assertEqual(b'groupcompress-block\n' |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
1005 |
b'8\n' # len(compress('')) |
1006 |
b'0\n' # len('') |
|
1007 |
b'%d\n' # compressed block len |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
1008 |
b'%s' # zheader |
1009 |
b'%s' # block |
|
|
3735.32.24
by John Arbash Meinel
_wire_bytes() now strips groups as necessary, as does _insert_record_stream |
1010 |
% (len(stripped_block), empty_z_header, |
1011 |
stripped_block), |
|
1012 |
wire_bytes) |
|
|
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
1013 |
|
|
3735.32.15
by John Arbash Meinel
Change the GroupCompressBlock code to allow not recording 'end'. |
1014 |
def test__wire_bytes(self): |
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
1015 |
locations, block = self.make_block(self._texts) |
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
1016 |
manager = groupcompress._LazyGroupContentManager(block) |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
1017 |
self.add_key_to_manager((b'key1',), locations, block, manager) |
1018 |
self.add_key_to_manager((b'key4',), locations, block, manager) |
|
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
1019 |
block_bytes = block.to_bytes() |
|
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
1020 |
wire_bytes = manager._wire_bytes() |
1021 |
(storage_kind, z_header_len, header_len, |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
1022 |
block_len, rest) = wire_bytes.split(b'\n', 4) |
|
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
1023 |
z_header_len = int(z_header_len) |
1024 |
header_len = int(header_len) |
|
1025 |
block_len = int(block_len) |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
1026 |
self.assertEqual(b'groupcompress-block', storage_kind) |
|
4665.3.8
by John Arbash Meinel
Of course, when you change the content, it can effect the stored wire bytes slightly. |
1027 |
self.assertEqual(34, z_header_len) |
1028 |
self.assertEqual(26, header_len) |
|
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
1029 |
self.assertEqual(len(block_bytes), block_len) |
|
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
1030 |
z_header = rest[:z_header_len] |
1031 |
header = zlib.decompress(z_header) |
|
1032 |
self.assertEqual(header_len, len(header)) |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
1033 |
entry1 = locations[(b'key1',)] |
1034 |
entry4 = locations[(b'key4',)] |
|
1035 |
self.assertEqualDiff(b'key1\n' |
|
1036 |
b'\n' # no parents |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
1037 |
b'%d\n' # start offset |
1038 |
b'%d\n' # end offset |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
1039 |
b'key4\n' |
1040 |
b'\n' |
|
1041 |
b'%d\n' |
|
1042 |
b'%d\n' |
|
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
1043 |
% (entry1[0], entry1[1], |
1044 |
entry4[0], entry4[1]), |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
1045 |
header) |
|
3735.32.16
by John Arbash Meinel
We now have a general header for the GC block. |
1046 |
z_block = rest[z_header_len:] |
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
1047 |
self.assertEqual(block_bytes, z_block) |
1048 |
||
1049 |
def test_from_bytes(self): |
|
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
1050 |
locations, block = self.make_block(self._texts) |
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
1051 |
manager = groupcompress._LazyGroupContentManager(block) |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
1052 |
self.add_key_to_manager((b'key1',), locations, block, manager) |
1053 |
self.add_key_to_manager((b'key4',), locations, block, manager) |
|
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
1054 |
wire_bytes = manager._wire_bytes() |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
1055 |
self.assertStartsWith(wire_bytes, b'groupcompress-block\n') |
|
3735.32.18
by John Arbash Meinel
We now support generating a network stream. |
1056 |
manager = groupcompress._LazyGroupContentManager.from_bytes(wire_bytes) |
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
1057 |
self.assertIsInstance(manager, groupcompress._LazyGroupContentManager) |
|
3735.38.2
by John Arbash Meinel
Make the text for key4 slightly longer, rather than include key3. |
1058 |
self.assertEqual(2, len(manager._factories)) |
|
3735.32.17
by John Arbash Meinel
We now round-trip the wire_bytes. |
1059 |
self.assertEqual(block._z_content, manager._block._z_content) |
1060 |
result_order = [] |
|
1061 |
for record in manager.get_record_stream(): |
|
1062 |
result_order.append(record.key) |
|
1063 |
text = self._texts[record.key] |
|
1064 |
self.assertEqual(text, record.get_bytes_as('fulltext')) |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
1065 |
self.assertEqual([(b'key1',), (b'key4',)], result_order) |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
1066 |
|
1067 |
def test__check_rebuild_no_changes(self): |
|
|
4665.3.7
by John Arbash Meinel
We needed a bit more data to actually get groups doing delta-compression. |
1068 |
block, manager = self.make_block_and_full_manager(self._texts) |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
1069 |
manager._check_rebuild_block() |
1070 |
self.assertIs(block, manager._block) |
|
1071 |
||
1072 |
def test__check_rebuild_only_one(self): |
|
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
1073 |
locations, block = self.make_block(self._texts) |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
1074 |
manager = groupcompress._LazyGroupContentManager(block) |
1075 |
# Request just the first key, which should trigger a 'strip' action
|
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
1076 |
self.add_key_to_manager((b'key1',), locations, block, manager) |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
1077 |
manager._check_rebuild_block() |
1078 |
self.assertIsNot(block, manager._block) |
|
1079 |
self.assertTrue(block._content_length > manager._block._content_length) |
|
1080 |
# We should be able to still get the content out of this block, though
|
|
1081 |
# it should only have 1 entry
|
|
1082 |
for record in manager.get_record_stream(): |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
1083 |
self.assertEqual((b'key1',), record.key) |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
1084 |
self.assertEqual(self._texts[record.key], |
1085 |
record.get_bytes_as('fulltext')) |
|
1086 |
||
1087 |
def test__check_rebuild_middle(self): |
|
|
3735.40.18
by John Arbash Meinel
Get rid of the entries dict in GroupCompressBlock. |
1088 |
locations, block = self.make_block(self._texts) |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
1089 |
manager = groupcompress._LazyGroupContentManager(block) |
1090 |
# Request a small key in the middle should trigger a 'rebuild'
|
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
1091 |
self.add_key_to_manager((b'key4',), locations, block, manager) |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
1092 |
manager._check_rebuild_block() |
1093 |
self.assertIsNot(block, manager._block) |
|
1094 |
self.assertTrue(block._content_length > manager._block._content_length) |
|
1095 |
for record in manager.get_record_stream(): |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
1096 |
self.assertEqual((b'key4',), record.key) |
|
3735.32.23
by John Arbash Meinel
Add a _LazyGroupContentManager._check_rebuild_block |
1097 |
self.assertEqual(self._texts[record.key], |
1098 |
record.get_bytes_as('fulltext')) |
|
|
4665.3.7
by John Arbash Meinel
We needed a bit more data to actually get groups doing delta-compression. |
1099 |
|
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1100 |
def test_manager_default_compressor_settings(self): |
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
1101 |
locations, old_block = self.make_block(self._texts) |
1102 |
manager = groupcompress._LazyGroupContentManager(old_block) |
|
1103 |
gcvf = groupcompress.GroupCompressVersionedFiles |
|
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1104 |
# It doesn't greedily evaluate _max_bytes_to_index
|
1105 |
self.assertIs(None, manager._compressor_settings) |
|
1106 |
self.assertEqual(gcvf._DEFAULT_COMPRESSOR_SETTINGS, |
|
1107 |
manager._get_compressor_settings()) |
|
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
1108 |
|
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1109 |
def test_manager_custom_compressor_settings(self): |
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
1110 |
locations, old_block = self.make_block(self._texts) |
1111 |
called = [] |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
1112 |
|
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1113 |
def compressor_settings(): |
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
1114 |
called.append('called') |
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1115 |
return (10,) |
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
1116 |
manager = groupcompress._LazyGroupContentManager(old_block, |
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
1117 |
get_compressor_settings=compressor_settings) |
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
1118 |
gcvf = groupcompress.GroupCompressVersionedFiles |
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1119 |
# It doesn't greedily evaluate compressor_settings
|
1120 |
self.assertIs(None, manager._compressor_settings) |
|
1121 |
self.assertEqual((10,), manager._get_compressor_settings()) |
|
1122 |
self.assertEqual((10,), manager._get_compressor_settings()) |
|
1123 |
self.assertEqual((10,), manager._compressor_settings) |
|
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
1124 |
# Only called 1 time
|
1125 |
self.assertEqual(['called'], called) |
|
1126 |
||
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1127 |
def test__rebuild_handles_compressor_settings(self): |
1128 |
if not isinstance(groupcompress.GroupCompressor, |
|
1129 |
groupcompress.PyrexGroupCompressor): |
|
1130 |
raise tests.TestNotApplicable('pure-python compressor' |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
1131 |
' does not handle compressor_settings') |
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
1132 |
locations, old_block = self.make_block(self._texts) |
1133 |
manager = groupcompress._LazyGroupContentManager(old_block, |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
1134 |
get_compressor_settings=lambda: dict(max_bytes_to_index=32)) |
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
1135 |
gc = manager._make_group_compressor() |
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1136 |
self.assertEqual(32, gc._delta_index._max_bytes_to_index) |
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
1137 |
self.add_key_to_manager((b'key3',), locations, old_block, manager) |
1138 |
self.add_key_to_manager((b'key4',), locations, old_block, manager) |
|
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
1139 |
action, last_byte, total_bytes = manager._check_rebuild_action() |
1140 |
self.assertEqual('rebuild', action) |
|
1141 |
manager._rebuild_block() |
|
1142 |
new_block = manager._block |
|
1143 |
self.assertIsNot(old_block, new_block) |
|
|
5755.2.8
by John Arbash Meinel
Do a lot of renaming. |
1144 |
# Because of the new max_bytes_to_index, we do a poor job of
|
|
5755.2.5
by John Arbash Meinel
Expose the setting up the stack. |
1145 |
# rebuilding. This is a side-effect of the change, but at least it does
|
1146 |
# show the setting had an effect.
|
|
1147 |
self.assertTrue(old_block._content_length < new_block._content_length) |
|
1148 |
||
|
4665.3.7
by John Arbash Meinel
We needed a bit more data to actually get groups doing delta-compression. |
1149 |
def test_check_is_well_utilized_all_keys(self): |
1150 |
block, manager = self.make_block_and_full_manager(self._texts) |
|
1151 |
self.assertFalse(manager.check_is_well_utilized()) |
|
1152 |
# Though we can fake it by changing the recommended minimum size
|
|
1153 |
manager._full_enough_block_size = block._content_length |
|
1154 |
self.assertTrue(manager.check_is_well_utilized()) |
|
1155 |
# Setting it just above causes it to fail
|
|
1156 |
manager._full_enough_block_size = block._content_length + 1 |
|
1157 |
self.assertFalse(manager.check_is_well_utilized()) |
|
1158 |
# Setting the mixed-block size doesn't do anything, because the content
|
|
1159 |
# is considered to not be 'mixed'
|
|
1160 |
manager._full_enough_mixed_block_size = block._content_length |
|
1161 |
self.assertFalse(manager.check_is_well_utilized()) |
|
1162 |
||
1163 |
def test_check_is_well_utilized_mixed_keys(self): |
|
1164 |
texts = {} |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
1165 |
f1k1 = (b'f1', b'k1') |
1166 |
f1k2 = (b'f1', b'k2') |
|
1167 |
f2k1 = (b'f2', b'k1') |
|
1168 |
f2k2 = (b'f2', b'k2') |
|
1169 |
texts[f1k1] = self._texts[(b'key1',)] |
|
1170 |
texts[f1k2] = self._texts[(b'key2',)] |
|
1171 |
texts[f2k1] = self._texts[(b'key3',)] |
|
1172 |
texts[f2k2] = self._texts[(b'key4',)] |
|
|
4665.3.7
by John Arbash Meinel
We needed a bit more data to actually get groups doing delta-compression. |
1173 |
block, manager = self.make_block_and_full_manager(texts) |
1174 |
self.assertFalse(manager.check_is_well_utilized()) |
|
1175 |
manager._full_enough_block_size = block._content_length |
|
1176 |
self.assertTrue(manager.check_is_well_utilized()) |
|
1177 |
manager._full_enough_block_size = block._content_length + 1 |
|
1178 |
self.assertFalse(manager.check_is_well_utilized()) |
|
1179 |
manager._full_enough_mixed_block_size = block._content_length |
|
1180 |
self.assertTrue(manager.check_is_well_utilized()) |
|
1181 |
||
1182 |
def test_check_is_well_utilized_partial_use(self): |
|
1183 |
locations, block = self.make_block(self._texts) |
|
1184 |
manager = groupcompress._LazyGroupContentManager(block) |
|
1185 |
manager._full_enough_block_size = block._content_length |
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
1186 |
self.add_key_to_manager((b'key1',), locations, block, manager) |
1187 |
self.add_key_to_manager((b'key2',), locations, block, manager) |
|
|
4665.3.7
by John Arbash Meinel
We needed a bit more data to actually get groups doing delta-compression. |
1188 |
# Just using the content from key1 and 2 is not enough to be considered
|
1189 |
# 'complete'
|
|
1190 |
self.assertFalse(manager.check_is_well_utilized()) |
|
1191 |
# However if we add key3, then we have enough, as we only require 75%
|
|
1192 |
# consumption
|
|
|
6803.2.1
by Martin
Make test_groupcompress.py correct for Python 3 |
1193 |
self.add_key_to_manager((b'key4',), locations, block, manager) |
|
4665.3.7
by John Arbash Meinel
We needed a bit more data to actually get groups doing delta-compression. |
1194 |
self.assertTrue(manager.check_is_well_utilized()) |
|
5365.4.1
by John Arbash Meinel
Find a case where we are wasting a bit of memory. |
1195 |
|
1196 |
||
1197 |
class Test_GCBuildDetails(tests.TestCase): |
|
1198 |
||
1199 |
def test_acts_like_tuple(self): |
|
1200 |
# _GCBuildDetails inlines some of the data that used to be spread out
|
|
1201 |
# across a bunch of tuples
|
|
1202 |
bd = groupcompress._GCBuildDetails((('parent1',), ('parent2',)), |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
1203 |
('INDEX', 10, 20, 0, 5)) |
|
5365.4.1
by John Arbash Meinel
Find a case where we are wasting a bit of memory. |
1204 |
self.assertEqual(4, len(bd)) |
1205 |
self.assertEqual(('INDEX', 10, 20, 0, 5), bd[0]) |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
1206 |
self.assertEqual(None, bd[1]) # Compression Parent is always None |
|
5365.4.1
by John Arbash Meinel
Find a case where we are wasting a bit of memory. |
1207 |
self.assertEqual((('parent1',), ('parent2',)), bd[2]) |
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
1208 |
self.assertEqual(('group', None), bd[3]) # Record details |
|
5365.4.1
by John Arbash Meinel
Find a case where we are wasting a bit of memory. |
1209 |
|
1210 |
def test__repr__(self): |
|
1211 |
bd = groupcompress._GCBuildDetails((('parent1',), ('parent2',)), |
|
|
7143.15.2
by Jelmer Vernooij
Run autopep8. |
1212 |
('INDEX', 10, 20, 0, 5)) |
|
5365.4.1
by John Arbash Meinel
Find a case where we are wasting a bit of memory. |
1213 |
self.assertEqual("_GCBuildDetails(('INDEX', 10, 20, 0, 5)," |
1214 |
" (('parent1',), ('parent2',)))", |
|
1215 |
repr(bd)) |