128
127
DATA_SUFFIX = '.knit'
129
128
INDEX_SUFFIX = '.kndx'
130
_STREAM_MIN_BUFFER_SIZE = 5 * 1024 * 1024
133
class KnitError(InternalBzrError):
138
class KnitCorrupt(KnitError):
140
_fmt = "Knit %(filename)s corrupt: %(how)s"
142
def __init__(self, filename, how):
143
KnitError.__init__(self)
144
self.filename = filename
148
class SHA1KnitCorrupt(KnitCorrupt):
150
_fmt = ("Knit %(filename)s corrupt: sha-1 of reconstructed text does not "
151
"match expected sha-1. key %(key)s expected sha %(expected)s actual "
154
def __init__(self, filename, actual, expected, key, content):
155
KnitError.__init__(self)
156
self.filename = filename
158
self.expected = expected
160
self.content = content
163
class KnitDataStreamIncompatible(KnitError):
164
# Not raised anymore, as we can convert data streams. In future we may
165
# need it again for more exotic cases, so we're keeping it around for now.
167
_fmt = "Cannot insert knit data stream of format \"%(stream_format)s\" into knit of format \"%(target_format)s\"."
169
def __init__(self, stream_format, target_format):
170
self.stream_format = stream_format
171
self.target_format = target_format
174
class KnitDataStreamUnknown(KnitError):
175
# Indicates a data stream we don't know how to handle.
177
_fmt = "Cannot parse knit data stream of format \"%(stream_format)s\"."
179
def __init__(self, stream_format):
180
self.stream_format = stream_format
183
class KnitHeaderError(KnitError):
185
_fmt = 'Knit header error: %(badline)r unexpected for file "%(filename)s".'
187
def __init__(self, badline, filename):
188
KnitError.__init__(self)
189
self.badline = badline
190
self.filename = filename
193
class KnitIndexUnknownMethod(KnitError):
194
"""Raised when we don't understand the storage method.
196
Currently only 'fulltext' and 'line-delta' are supported.
199
_fmt = ("Knit index %(filename)s does not have a known method"
200
" in options: %(options)r")
202
def __init__(self, filename, options):
203
KnitError.__init__(self)
204
self.filename = filename
205
self.options = options
129
_STREAM_MIN_BUFFER_SIZE = 5*1024*1024
208
132
class KnitAdapter(object):
223
147
class FTAnnotatedToUnannotated(KnitAdapter):
224
148
"""An adapter from FT annotated knits to unannotated ones."""
226
def get_bytes(self, factory, target_storage_kind):
227
if target_storage_kind != 'knit-ft-gz':
228
raise errors.UnavailableRepresentation(
229
factory.key, target_storage_kind, factory.storage_kind)
150
def get_bytes(self, factory):
230
151
annotated_compressed_bytes = factory._raw_record
231
152
rec, contents = \
232
153
self._data._parse_record_unchecked(annotated_compressed_bytes)
233
154
content = self._annotate_factory.parse_fulltext(contents, rec[1])
234
size, chunks = self._data._record_to_data(
235
(rec[1],), rec[3], content.text())
236
return b''.join(chunks)
155
size, bytes = self._data._record_to_data((rec[1],), rec[3], content.text())
239
159
class DeltaAnnotatedToUnannotated(KnitAdapter):
240
160
"""An adapter for deltas from annotated to unannotated."""
242
def get_bytes(self, factory, target_storage_kind):
243
if target_storage_kind != 'knit-delta-gz':
244
raise errors.UnavailableRepresentation(
245
factory.key, target_storage_kind, factory.storage_kind)
162
def get_bytes(self, factory):
246
163
annotated_compressed_bytes = factory._raw_record
247
164
rec, contents = \
248
165
self._data._parse_record_unchecked(annotated_compressed_bytes)
249
166
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
251
168
contents = self._plain_factory.lower_line_delta(delta)
252
size, chunks = self._data._record_to_data((rec[1],), rec[3], contents)
253
return b''.join(chunks)
169
size, bytes = self._data._record_to_data((rec[1],), rec[3], contents)
256
173
class FTAnnotatedToFullText(KnitAdapter):
257
174
"""An adapter from FT annotated knits to unannotated ones."""
259
def get_bytes(self, factory, target_storage_kind):
176
def get_bytes(self, factory):
260
177
annotated_compressed_bytes = factory._raw_record
261
178
rec, contents = \
262
179
self._data._parse_record_unchecked(annotated_compressed_bytes)
263
180
content, delta = self._annotate_factory.parse_record(factory.key[-1],
264
contents, factory._build_details, None)
265
if target_storage_kind == 'fulltext':
266
return b''.join(content.text())
267
elif target_storage_kind in ('chunked', 'lines'):
268
return content.text()
269
raise errors.UnavailableRepresentation(
270
factory.key, target_storage_kind, factory.storage_kind)
181
contents, factory._build_details, None)
182
return ''.join(content.text())
273
185
class DeltaAnnotatedToFullText(KnitAdapter):
274
186
"""An adapter for deltas from annotated to unannotated."""
276
def get_bytes(self, factory, target_storage_kind):
188
def get_bytes(self, factory):
277
189
annotated_compressed_bytes = factory._raw_record
278
190
rec, contents = \
279
191
self._data._parse_record_unchecked(annotated_compressed_bytes)
280
192
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
282
194
compression_parent = factory.parents[0]
283
basis_entry = next(self._basis_vf.get_record_stream(
284
[compression_parent], 'unordered', True))
195
basis_entry = self._basis_vf.get_record_stream(
196
[compression_parent], 'unordered', True).next()
285
197
if basis_entry.storage_kind == 'absent':
286
198
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
287
basis_lines = basis_entry.get_bytes_as('lines')
199
basis_chunks = basis_entry.get_bytes_as('chunked')
200
basis_lines = osutils.chunks_to_lines(basis_chunks)
288
201
# Manually apply the delta because we have one annotated content and
290
203
basis_content = PlainKnitContent(basis_lines, compression_parent)
291
204
basis_content.apply_delta(delta, rec[1])
292
205
basis_content._should_strip_eol = factory._build_details[1]
294
if target_storage_kind == 'fulltext':
295
return b''.join(basis_content.text())
296
elif target_storage_kind in ('chunked', 'lines'):
297
return basis_content.text()
298
raise errors.UnavailableRepresentation(
299
factory.key, target_storage_kind, factory.storage_kind)
206
return ''.join(basis_content.text())
302
209
class FTPlainToFullText(KnitAdapter):
303
210
"""An adapter from FT plain knits to unannotated ones."""
305
def get_bytes(self, factory, target_storage_kind):
212
def get_bytes(self, factory):
306
213
compressed_bytes = factory._raw_record
307
214
rec, contents = \
308
215
self._data._parse_record_unchecked(compressed_bytes)
309
216
content, delta = self._plain_factory.parse_record(factory.key[-1],
310
contents, factory._build_details, None)
311
if target_storage_kind == 'fulltext':
312
return b''.join(content.text())
313
elif target_storage_kind in ('chunked', 'lines'):
314
return content.text()
315
raise errors.UnavailableRepresentation(
316
factory.key, target_storage_kind, factory.storage_kind)
217
contents, factory._build_details, None)
218
return ''.join(content.text())
319
221
class DeltaPlainToFullText(KnitAdapter):
320
222
"""An adapter for deltas from annotated to unannotated."""
322
def get_bytes(self, factory, target_storage_kind):
224
def get_bytes(self, factory):
323
225
compressed_bytes = factory._raw_record
324
226
rec, contents = \
325
227
self._data._parse_record_unchecked(compressed_bytes)
326
228
delta = self._plain_factory.parse_line_delta(contents, rec[1])
327
229
compression_parent = factory.parents[0]
328
230
# XXX: string splitting overhead.
329
basis_entry = next(self._basis_vf.get_record_stream(
330
[compression_parent], 'unordered', True))
231
basis_entry = self._basis_vf.get_record_stream(
232
[compression_parent], 'unordered', True).next()
331
233
if basis_entry.storage_kind == 'absent':
332
234
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
333
basis_lines = basis_entry.get_bytes_as('lines')
235
basis_chunks = basis_entry.get_bytes_as('chunked')
236
basis_lines = osutils.chunks_to_lines(basis_chunks)
334
237
basis_content = PlainKnitContent(basis_lines, compression_parent)
335
238
# Manually apply the delta because we have one annotated content and
337
240
content, _ = self._plain_factory.parse_record(rec[1], contents,
338
factory._build_details, basis_content)
339
if target_storage_kind == 'fulltext':
340
return b''.join(content.text())
341
elif target_storage_kind in ('chunked', 'lines'):
342
return content.text()
343
raise errors.UnavailableRepresentation(
344
factory.key, target_storage_kind, factory.storage_kind)
241
factory._build_details, basis_content)
242
return ''.join(content.text())
347
245
class KnitContentFactory(ContentFactory):
405
301
if self._network_bytes is None:
406
302
self._create_network_bytes()
407
303
return self._network_bytes
408
if ('-ft-' in self.storage_kind
409
and storage_kind in ('chunked', 'fulltext', 'lines')):
410
adapter_key = (self.storage_kind, storage_kind)
304
if ('-ft-' in self.storage_kind and
305
storage_kind in ('chunked', 'fulltext')):
306
adapter_key = (self.storage_kind, 'fulltext')
411
307
adapter_factory = adapter_registry.get(adapter_key)
412
308
adapter = adapter_factory(None)
413
return adapter.get_bytes(self, storage_kind)
309
bytes = adapter.get_bytes(self)
310
if storage_kind == 'chunked':
414
314
if self._knit is not None:
415
315
# Not redundant with direct conversion above - that only handles
416
316
# fulltext cases.
417
if storage_kind in ('chunked', 'lines'):
317
if storage_kind == 'chunked':
418
318
return self._knit.get_lines(self.key[0])
419
319
elif storage_kind == 'fulltext':
420
320
return self._knit.get_text(self.key[0])
421
321
raise errors.UnavailableRepresentation(self.key, storage_kind,
424
def iter_bytes_as(self, storage_kind):
425
return iter(self.get_bytes_as(storage_kind))
428
325
class LazyKnitContentFactory(ContentFactory):
740
632
# loop to minimise any performance impact
742
634
for header in lines:
743
start, end, count = [int(n) for n in header.split(b',')]
744
contents = [next(lines).split(b' ', 1)[1]
745
for _ in range(count)]
635
start, end, count = [int(n) for n in header.split(',')]
636
contents = [next().split(' ', 1)[1] for i in xrange(count)]
746
637
result.append((start, end, count, contents))
748
639
for header in lines:
749
start, end, count = [int(n) for n in header.split(b',')]
750
contents = [tuple(next(lines).split(b' ', 1))
751
for _ in range(count)]
640
start, end, count = [int(n) for n in header.split(',')]
641
contents = [tuple(next().split(' ', 1)) for i in xrange(count)]
752
642
result.append((start, end, count, contents))
755
645
def get_fulltext_content(self, lines):
756
646
"""Extract just the content lines from a fulltext."""
757
return (line.split(b' ', 1)[1] for line in lines)
647
return (line.split(' ', 1)[1] for line in lines)
759
649
def get_linedelta_content(self, lines):
760
650
"""Extract just the content from a line delta.
1023
909
# indexes can't directly store that, so we give them
1024
910
# an empty tuple instead.
1026
line_bytes = b''.join(lines)
912
line_bytes = ''.join(lines)
1027
913
return self._add(key, lines, parents,
1028
parent_texts, left_matching_blocks, nostore_sha, random_id,
1029
line_bytes=line_bytes)
914
parent_texts, left_matching_blocks, nostore_sha, random_id,
915
line_bytes=line_bytes)
1031
def add_content(self, content_factory, parent_texts=None,
1032
left_matching_blocks=None, nostore_sha=None,
1034
"""See VersionedFiles.add_content()."""
917
def _add_text(self, key, parents, text, nostore_sha=None, random_id=False):
918
"""See VersionedFiles._add_text()."""
1035
919
self._index._check_write_ok()
1036
key = content_factory.key
1037
parents = content_factory.parents
1038
920
self._check_add(key, None, random_id, check_content=False)
921
if text.__class__ is not str:
922
raise errors.BzrBadParameterUnicode("text")
1039
923
if parents is None:
1040
924
# The caller might pass None if there is no graph data, but kndx
1041
925
# indexes can't directly store that, so we give them
1042
926
# an empty tuple instead.
1044
lines = content_factory.get_bytes_as('lines')
1045
line_bytes = content_factory.get_bytes_as('fulltext')
1046
return self._add(key, lines, parents,
1047
parent_texts, left_matching_blocks, nostore_sha, random_id,
1048
line_bytes=line_bytes)
928
return self._add(key, None, parents,
929
None, None, nostore_sha, random_id,
1050
932
def _add(self, key, lines, parents, parent_texts,
1051
left_matching_blocks, nostore_sha, random_id,
933
left_matching_blocks, nostore_sha, random_id,
1053
935
"""Add a set of lines on top of version specified by parents.
1055
937
Any versions not present will be converted into ghosts.
1126
1008
if delta or (self._factory.annotated and len(present_parents) > 0):
1127
1009
# Merge annotations from parent texts if needed.
1128
1010
delta_hunks = self._merge_annotations(content, present_parents,
1129
parent_texts, delta, self._factory.annotated,
1130
left_matching_blocks)
1011
parent_texts, delta, self._factory.annotated,
1012
left_matching_blocks)
1133
options.append(b'line-delta')
1015
options.append('line-delta')
1134
1016
store_lines = self._factory.lower_line_delta(delta_hunks)
1135
size, data = self._record_to_data(key, digest, store_lines)
1017
size, bytes = self._record_to_data(key, digest,
1137
options.append(b'fulltext')
1020
options.append('fulltext')
1138
1021
# isinstance is slower and we have no hierarchy.
1139
1022
if self._factory.__class__ is KnitPlainFactory:
1140
1023
# Use the already joined bytes saving iteration time in
1141
1024
# _record_to_data.
1142
1025
dense_lines = [line_bytes]
1144
dense_lines.append(b'\n')
1145
size, data = self._record_to_data(key, digest,
1027
dense_lines.append('\n')
1028
size, bytes = self._record_to_data(key, digest,
1148
1031
# get mixed annotation + content and feed it into the
1150
1033
store_lines = self._factory.lower_fulltext(content)
1151
size, data = self._record_to_data(key, digest, store_lines)
1034
size, bytes = self._record_to_data(key, digest,
1153
access_memo = self._access.add_raw_record(key, size, data)
1037
access_memo = self._access.add_raw_records([(key, size)], bytes)[0]
1154
1038
self._index.add_records(
1155
1039
((key, options, access_memo, parents),),
1156
1040
random_id=random_id)
1509
1405
keys = set(remaining_keys)
1510
1406
for content_factory in self._get_remaining_record_stream(keys,
1511
ordering, include_delta_closure):
1407
ordering, include_delta_closure):
1512
1408
remaining_keys.discard(content_factory.key)
1513
1409
yield content_factory
1515
except errors.RetryWithNewPacks as e:
1411
except errors.RetryWithNewPacks, e:
1516
1412
self._access.reload_or_raise(e)
1518
1414
def _get_remaining_record_stream(self, keys, ordering,
1519
1415
include_delta_closure):
1520
1416
"""This function is the 'retry' portion for get_record_stream."""
1521
1417
if include_delta_closure:
1522
positions = self._get_components_positions(
1523
keys, allow_missing=True)
1418
positions = self._get_components_positions(keys, allow_missing=True)
1525
1420
build_details = self._index.get_build_details(keys)
1526
1421
# map from key to
1527
1422
# (record_details, access_memo, compression_parent_key)
1528
1423
positions = dict((key, self._build_details_to_components(details))
1529
for key, details in viewitems(build_details))
1424
for key, details in build_details.iteritems())
1530
1425
absent_keys = keys.difference(set(positions))
1531
1426
# There may be more absent keys : if we're missing the basis component
1532
1427
# and are trying to include the delta closure.
2001
1894
# 4168 calls in 2880 217 internal
2002
1895
# 4168 calls to _parse_record_header in 2121
2003
1896
# 4168 calls to readlines in 330
2004
with gzip.GzipFile(mode='rb', fileobj=BytesIO(data)) as df:
2006
record_contents = df.readlines()
2007
except Exception as e:
2008
raise KnitCorrupt(self, "Corrupt compressed record %r, got %s(%s)" %
2009
(data, e.__class__.__name__, str(e)))
2010
header = record_contents.pop(0)
2011
rec = self._split_header(header)
2012
last_line = record_contents.pop()
2013
if len(record_contents) != int(rec[2]):
2014
raise KnitCorrupt(self,
2015
'incorrect number of lines %s != %s'
2016
' for version {%s} %s'
2017
% (len(record_contents), int(rec[2]),
2018
rec[1], record_contents))
2019
if last_line != b'end %s\n' % rec[1]:
2020
raise KnitCorrupt(self,
2021
'unexpected version end line %r, wanted %r'
2022
% (last_line, rec[1]))
1897
df = tuned_gzip.GzipFile(mode='rb', fileobj=StringIO(data))
1899
record_contents = df.readlines()
1900
except Exception, e:
1901
raise KnitCorrupt(self, "Corrupt compressed record %r, got %s(%s)" %
1902
(data, e.__class__.__name__, str(e)))
1903
header = record_contents.pop(0)
1904
rec = self._split_header(header)
1905
last_line = record_contents.pop()
1906
if len(record_contents) != int(rec[2]):
1907
raise KnitCorrupt(self,
1908
'incorrect number of lines %s != %s'
1909
' for version {%s} %s'
1910
% (len(record_contents), int(rec[2]),
1911
rec[1], record_contents))
1912
if last_line != 'end %s\n' % rec[1]:
1913
raise KnitCorrupt(self,
1914
'unexpected version end line %r, wanted %r'
1915
% (last_line, rec[1]))
2023
1917
return rec, record_contents
2025
1919
def _read_records_iter(self, records):
2091
1985
:param key: The key of the record. Currently keys are always serialised
2092
1986
using just the trailing component.
2093
1987
:param dense_lines: The bytes of lines but in a denser form. For
2094
instance, if lines is a list of 1000 bytestrings each ending in
2095
\\n, dense_lines may be a list with one line in it, containing all
2096
the 1000's lines and their \\n's. Using dense_lines if it is
2097
already known is a win because the string join to create bytes in
2098
this function spends less time resizing the final string.
2099
:return: (len, chunked bytestring with compressed data)
1988
instance, if lines is a list of 1000 bytestrings each ending in \n,
1989
dense_lines may be a list with one line in it, containing all the
1990
1000's lines and their \n's. Using dense_lines if it is already
1991
known is a win because the string join to create bytes in this
1992
function spends less time resizing the final string.
1993
:return: (len, a StringIO instance with the raw data ready to read.)
2101
chunks = [b"version %s %d %s\n" % (key[-1], len(lines), digest)]
1995
chunks = ["version %s %d %s\n" % (key[-1], len(lines), digest)]
2102
1996
chunks.extend(dense_lines or lines)
2103
chunks.append(b"end " + key[-1] + b"\n")
1997
chunks.append("end %s\n" % key[-1])
2104
1998
for chunk in chunks:
2105
if not isinstance(chunk, bytes):
1999
if type(chunk) is not str:
2106
2000
raise AssertionError(
2107
2001
'data must be plain bytes was %s' % type(chunk))
2108
if lines and not lines[-1].endswith(b'\n'):
2002
if lines and lines[-1][-1] != '\n':
2109
2003
raise ValueError('corrupt lines value %r' % lines)
2110
compressed_chunks = tuned_gzip.chunks_to_gzip(chunks)
2111
return sum(map(len, compressed_chunks)), compressed_chunks
2004
compressed_bytes = tuned_gzip.chunks_to_gzip(chunks)
2005
return len(compressed_bytes), compressed_bytes
2113
2007
def _split_header(self, line):
2114
2008
rec = line.split()
2281
2175
# one line with next ('' for None)
2282
2176
# one line with byte count of the record bytes
2283
2177
# the record bytes
2284
for key, (record_bytes, (method, noeol), next) in viewitems(
2285
self._raw_record_map):
2286
key_bytes = b'\x00'.join(key)
2178
for key, (record_bytes, (method, noeol), next) in \
2179
self._raw_record_map.iteritems():
2180
key_bytes = '\x00'.join(key)
2287
2181
parents = self.global_map.get(key, None)
2288
2182
if parents is None:
2289
parent_bytes = b'None:'
2183
parent_bytes = 'None:'
2291
parent_bytes = b'\t'.join(b'\x00'.join(key) for key in parents)
2292
method_bytes = method.encode('ascii')
2185
parent_bytes = '\t'.join('\x00'.join(key) for key in parents)
2186
method_bytes = method
2298
next_bytes = b'\x00'.join(next)
2192
next_bytes = '\x00'.join(next)
2301
map_byte_list.append(b'\n'.join(
2302
[key_bytes, parent_bytes, method_bytes, noeol_bytes, next_bytes,
2303
b'%d' % len(record_bytes), record_bytes]))
2304
map_bytes = b''.join(map_byte_list)
2195
map_byte_list.append('%s\n%s\n%s\n%s\n%s\n%d\n%s' % (
2196
key_bytes, parent_bytes, method_bytes, noeol_bytes, next_bytes,
2197
len(record_bytes), record_bytes))
2198
map_bytes = ''.join(map_byte_list)
2305
2199
lines.append(map_bytes)
2306
bytes = b'\n'.join(lines)
2200
bytes = '\n'.join(lines)
2388
2282
end = len(bytes)
2389
2283
while start < end:
2390
2284
# 1 line with key
2391
line_end = bytes.find(b'\n', start)
2392
key = tuple(bytes[start:line_end].split(b'\x00'))
2285
line_end = bytes.find('\n', start)
2286
key = tuple(bytes[start:line_end].split('\x00'))
2393
2287
start = line_end + 1
2394
2288
# 1 line with parents (None: for None, '' for ())
2395
line_end = bytes.find(b'\n', start)
2289
line_end = bytes.find('\n', start)
2396
2290
line = bytes[start:line_end]
2397
if line == b'None:':
2400
2294
parents = tuple(
2401
tuple(segment.split(b'\x00')) for segment in line.split(b'\t')
2295
[tuple(segment.split('\x00')) for segment in line.split('\t')
2403
2297
self.global_map[key] = parents
2404
2298
start = line_end + 1
2405
2299
# one line with method
2406
line_end = bytes.find(b'\n', start)
2300
line_end = bytes.find('\n', start)
2407
2301
line = bytes[start:line_end]
2408
method = line.decode('ascii')
2409
2303
start = line_end + 1
2410
2304
# one line with noeol
2411
line_end = bytes.find(b'\n', start)
2305
line_end = bytes.find('\n', start)
2412
2306
line = bytes[start:line_end]
2413
noeol = line == b"T"
2414
2308
start = line_end + 1
2415
# one line with next (b'' for None)
2416
line_end = bytes.find(b'\n', start)
2309
# one line with next ('' for None)
2310
line_end = bytes.find('\n', start)
2417
2311
line = bytes[start:line_end]
2421
next = tuple(bytes[start:line_end].split(b'\x00'))
2315
next = tuple(bytes[start:line_end].split('\x00'))
2422
2316
start = line_end + 1
2423
2317
# one line with byte count of the record bytes
2424
line_end = bytes.find(b'\n', start)
2318
line_end = bytes.find('\n', start)
2425
2319
line = bytes[start:line_end]
2426
2320
count = int(line)
2427
2321
start = line_end + 1
2428
2322
# the record bytes
2429
record_bytes = bytes[start:start + count]
2323
record_bytes = bytes[start:start+count]
2430
2324
start = start + count
2431
2325
# put it in the map
2432
2326
self._raw_record_map[key] = (record_bytes, (method, noeol), next)
2892
2784
def _split_key(self, key):
2893
2785
"""Split key into a prefix and suffix."""
2894
# GZ 2018-07-03: This is intentionally either a sequence or bytes?
2895
if isinstance(key, bytes):
2896
return key[:-1], key[-1:]
2897
2786
return key[:-1], key[-1]
2789
class _KeyRefs(object):
2791
def __init__(self, track_new_keys=False):
2792
# dict mapping 'key' to 'set of keys referring to that key'
2795
# set remembering all new keys
2796
self.new_keys = set()
2798
self.new_keys = None
2804
self.new_keys.clear()
2806
def add_references(self, key, refs):
2807
# Record the new references
2808
for referenced in refs:
2810
needed_by = self.refs[referenced]
2812
needed_by = self.refs[referenced] = set()
2814
# Discard references satisfied by the new key
2817
def get_new_keys(self):
2818
return self.new_keys
2820
def get_unsatisfied_refs(self):
2821
return self.refs.iterkeys()
2823
def _satisfy_refs_for_key(self, key):
2827
# No keys depended on this key. That's ok.
2830
def add_key(self, key):
2831
# satisfy refs for key, and remember that we've seen this key.
2832
self._satisfy_refs_for_key(key)
2833
if self.new_keys is not None:
2834
self.new_keys.add(key)
2836
def satisfy_refs_for_keys(self, keys):
2838
self._satisfy_refs_for_key(key)
2840
def get_referrers(self):
2842
for referrers in self.refs.itervalues():
2843
result.update(referrers)
2900
2847
class _KnitGraphIndex(object):
2901
2848
"""A KnitVersionedFiles index layered on GraphIndex."""
2903
2850
def __init__(self, graph_index, is_locked, deltas=False, parents=True,
2904
add_callback=None, track_external_parent_refs=False):
2851
add_callback=None, track_external_parent_refs=False):
2905
2852
"""Construct a KnitGraphIndex on a graph_index.
2907
:param graph_index: An implementation of breezy.index.GraphIndex.
2854
:param graph_index: An implementation of bzrlib.index.GraphIndex.
2908
2855
:param is_locked: A callback to check whether the object should answer
2910
2857
:param deltas: Allow delta-compressed records.
3281
class _DirectPackAccess(object):
3282
"""Access to data in one or more packs with less translation."""
3284
def __init__(self, index_to_packs, reload_func=None, flush_func=None):
3285
"""Create a _DirectPackAccess object.
3287
:param index_to_packs: A dict mapping index objects to the transport
3288
and file names for obtaining data.
3289
:param reload_func: A function to call if we determine that the pack
3290
files have moved and we need to reload our caches. See
3291
bzrlib.repo_fmt.pack_repo.AggregateIndex for more details.
3293
self._container_writer = None
3294
self._write_index = None
3295
self._indices = index_to_packs
3296
self._reload_func = reload_func
3297
self._flush_func = flush_func
3299
def add_raw_records(self, key_sizes, raw_data):
3300
"""Add raw knit bytes to a storage area.
3302
The data is spooled to the container writer in one bytes-record per
3305
:param sizes: An iterable of tuples containing the key and size of each
3307
:param raw_data: A bytestring containing the data.
3308
:return: A list of memos to retrieve the record later. Each memo is an
3309
opaque index memo. For _DirectPackAccess the memo is (index, pos,
3310
length), where the index field is the write_index object supplied
3311
to the PackAccess object.
3313
if type(raw_data) is not str:
3314
raise AssertionError(
3315
'data must be plain bytes was %s' % type(raw_data))
3318
for key, size in key_sizes:
3319
p_offset, p_length = self._container_writer.add_bytes_record(
3320
raw_data[offset:offset+size], [])
3322
result.append((self._write_index, p_offset, p_length))
3326
"""Flush pending writes on this access object.
3328
This will flush any buffered writes to a NewPack.
3330
if self._flush_func is not None:
3333
def get_raw_records(self, memos_for_retrieval):
3334
"""Get the raw bytes for a records.
3336
:param memos_for_retrieval: An iterable containing the (index, pos,
3337
length) memo for retrieving the bytes. The Pack access method
3338
looks up the pack to use for a given record in its index_to_pack
3340
:return: An iterator over the bytes of the records.
3342
# first pass, group into same-index requests
3344
current_index = None
3345
for (index, offset, length) in memos_for_retrieval:
3346
if current_index == index:
3347
current_list.append((offset, length))
3349
if current_index is not None:
3350
request_lists.append((current_index, current_list))
3351
current_index = index
3352
current_list = [(offset, length)]
3353
# handle the last entry
3354
if current_index is not None:
3355
request_lists.append((current_index, current_list))
3356
for index, offsets in request_lists:
3358
transport, path = self._indices[index]
3360
# A KeyError here indicates that someone has triggered an index
3361
# reload, and this index has gone missing, we need to start
3363
if self._reload_func is None:
3364
# If we don't have a _reload_func there is nothing that can
3367
raise errors.RetryWithNewPacks(index,
3368
reload_occurred=True,
3369
exc_info=sys.exc_info())
3371
reader = pack.make_readv_reader(transport, path, offsets)
3372
for names, read_func in reader.iter_records():
3373
yield read_func(None)
3374
except errors.NoSuchFile:
3375
# A NoSuchFile error indicates that a pack file has gone
3376
# missing on disk, we need to trigger a reload, and start over.
3377
if self._reload_func is None:
3379
raise errors.RetryWithNewPacks(transport.abspath(path),
3380
reload_occurred=False,
3381
exc_info=sys.exc_info())
3383
def set_writer(self, writer, index, transport_packname):
3384
"""Set a writer to use for adding data."""
3385
if index is not None:
3386
self._indices[index] = transport_packname
3387
self._container_writer = writer
3388
self._write_index = index
3390
def reload_or_raise(self, retry_exc):
3391
"""Try calling the reload function, or re-raise the original exception.
3393
This should be called after _DirectPackAccess raises a
3394
RetryWithNewPacks exception. This function will handle the common logic
3395
of determining when the error is fatal versus being temporary.
3396
It will also make sure that the original exception is raised, rather
3397
than the RetryWithNewPacks exception.
3399
If this function returns, then the calling function should retry
3400
whatever operation was being performed. Otherwise an exception will
3403
:param retry_exc: A RetryWithNewPacks exception.
3406
if self._reload_func is None:
3408
elif not self._reload_func():
3409
# The reload claimed that nothing changed
3410
if not retry_exc.reload_occurred:
3411
# If there wasn't an earlier reload, then we really were
3412
# expecting to find changes. We didn't find them, so this is a
3416
exc_class, exc_value, exc_traceback = retry_exc.exc_info
3417
raise exc_class, exc_value, exc_traceback
3420
# Deprecated, use PatienceSequenceMatcher instead
3421
KnitSequenceMatcher = patiencediff.PatienceSequenceMatcher
3350
3424
def annotate_knit(knit, revision_id):
3351
3425
"""Annotate a knit with no cached annotations.
3461
3535
records, ann_keys = self._get_build_graph(key)
3462
3536
for idx, (sub_key, text, num_lines) in enumerate(
3463
self._extract_texts(records)):
3537
self._extract_texts(records)):
3464
3538
if pb is not None:
3465
pb.update(gettext('annotating'), idx, len(records))
3539
pb.update('annotating', idx, len(records))
3466
3540
yield sub_key, text, num_lines
3467
3541
for sub_key in ann_keys:
3468
3542
text = self._text_cache[sub_key]
3469
num_lines = len(text) # bad assumption
3543
num_lines = len(text) # bad assumption
3470
3544
yield sub_key, text, num_lines
3472
except errors.RetryWithNewPacks as e:
3546
except errors.RetryWithNewPacks, e:
3473
3547
self._vf._access.reload_or_raise(e)
3474
3548
# The cached build_details are no longer valid
3475
3549
self._all_build_details.clear()
3477
3551
def _cache_delta_blocks(self, key, compression_parent, delta, lines):
3478
3552
parent_lines = self._text_cache[compression_parent]
3479
blocks = list(KnitContent.get_line_delta_blocks(
3480
delta, parent_lines, lines))
3553
blocks = list(KnitContent.get_line_delta_blocks(delta, parent_lines, lines))
3481
3554
self._matching_blocks[(key, compression_parent)] = blocks
3483
3556
def _expand_record(self, key, parent_keys, compression_parent, record,