1
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
"""Knit versionedfile implementation.
19
A knit is a versioned file implementation that supports efficient append only
23
lifeless: the data file is made up of "delta records". each delta record has a delta header
24
that contains; (1) a version id, (2) the size of the delta (in lines), and (3) the digest of
25
the -expanded data- (ie, the delta applied to the parent). the delta also ends with a
26
end-marker; simply "end VERSION"
28
delta can be line or full contents.a
29
... the 8's there are the index number of the annotation.
30
version robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad 7 c7d23b2a5bd6ca00e8e266cec0ec228158ee9f9e
34
8 e.set('executable', 'yes')
36
8 if elt.get('executable') == 'yes':
37
8 ie.executable = True
38
end robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad
42
09:33 < jrydberg> lifeless: each index is made up of a tuple of; version id, options, position, size, parents
43
09:33 < jrydberg> lifeless: the parents are currently dictionary compressed
44
09:33 < jrydberg> lifeless: (meaning it currently does not support ghosts)
45
09:33 < lifeless> right
46
09:33 < jrydberg> lifeless: the position and size is the range in the data file
49
so the index sequence is the dictionary compressed sequence number used
50
in the deltas to provide line annotation
55
# 10:16 < lifeless> make partial index writes safe
56
# 10:16 < lifeless> implement 'knit.check()' like weave.check()
57
# 10:17 < lifeless> record known ghosts so we can detect when they are filled in rather than the current 'reweave
59
# move sha1 out of the content so that join is faster at verifying parents
60
# record content length ?
63
from cStringIO import StringIO
64
from itertools import izip, chain
69
from bzrlib.lazy_import import lazy_import
70
lazy_import(globals(), """
90
from bzrlib.errors import (
98
RevisionAlreadyPresent,
101
from bzrlib.osutils import (
108
from bzrlib.versionedfile import (
109
AbsentContentFactory,
113
ChunkedContentFactory,
119
# TODO: Split out code specific to this format into an associated object.
121
# TODO: Can we put in some kind of value to check that the index and data
122
# files belong together?
124
# TODO: accommodate binaries, perhaps by storing a byte count
126
# TODO: function to check whole file
128
# TODO: atomically append data, then measure backwards from the cursor
129
# position after writing to work out where it was located. we may need to
130
# bypass python file buffering.
132
DATA_SUFFIX = '.knit'
133
INDEX_SUFFIX = '.kndx'
136
class KnitAdapter(object):
137
"""Base class for knit record adaption."""
139
def __init__(self, basis_vf):
140
"""Create an adapter which accesses full texts from basis_vf.
142
:param basis_vf: A versioned file to access basis texts of deltas from.
143
May be None for adapters that do not need to access basis texts.
145
self._data = KnitVersionedFiles(None, None)
146
self._annotate_factory = KnitAnnotateFactory()
147
self._plain_factory = KnitPlainFactory()
148
self._basis_vf = basis_vf
151
class FTAnnotatedToUnannotated(KnitAdapter):
152
"""An adapter from FT annotated knits to unannotated ones."""
154
def get_bytes(self, factory, annotated_compressed_bytes):
156
self._data._parse_record_unchecked(annotated_compressed_bytes)
157
content = self._annotate_factory.parse_fulltext(contents, rec[1])
158
size, bytes = self._data._record_to_data((rec[1],), rec[3], content.text())
162
class DeltaAnnotatedToUnannotated(KnitAdapter):
163
"""An adapter for deltas from annotated to unannotated."""
165
def get_bytes(self, factory, annotated_compressed_bytes):
167
self._data._parse_record_unchecked(annotated_compressed_bytes)
168
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
170
contents = self._plain_factory.lower_line_delta(delta)
171
size, bytes = self._data._record_to_data((rec[1],), rec[3], contents)
175
class FTAnnotatedToFullText(KnitAdapter):
176
"""An adapter from FT annotated knits to unannotated ones."""
178
def get_bytes(self, factory, annotated_compressed_bytes):
180
self._data._parse_record_unchecked(annotated_compressed_bytes)
181
content, delta = self._annotate_factory.parse_record(factory.key[-1],
182
contents, factory._build_details, None)
183
return ''.join(content.text())
186
class DeltaAnnotatedToFullText(KnitAdapter):
187
"""An adapter for deltas from annotated to unannotated."""
189
def get_bytes(self, factory, annotated_compressed_bytes):
191
self._data._parse_record_unchecked(annotated_compressed_bytes)
192
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
194
compression_parent = factory.parents[0]
195
basis_entry = self._basis_vf.get_record_stream(
196
[compression_parent], 'unordered', True).next()
197
if basis_entry.storage_kind == 'absent':
198
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
199
basis_chunks = basis_entry.get_bytes_as('chunked')
200
basis_lines = osutils.chunks_to_lines(basis_chunks)
201
# Manually apply the delta because we have one annotated content and
203
basis_content = PlainKnitContent(basis_lines, compression_parent)
204
basis_content.apply_delta(delta, rec[1])
205
basis_content._should_strip_eol = factory._build_details[1]
206
return ''.join(basis_content.text())
209
class FTPlainToFullText(KnitAdapter):
210
"""An adapter from FT plain knits to unannotated ones."""
212
def get_bytes(self, factory, compressed_bytes):
214
self._data._parse_record_unchecked(compressed_bytes)
215
content, delta = self._plain_factory.parse_record(factory.key[-1],
216
contents, factory._build_details, None)
217
return ''.join(content.text())
220
class DeltaPlainToFullText(KnitAdapter):
221
"""An adapter for deltas from annotated to unannotated."""
223
def get_bytes(self, factory, compressed_bytes):
225
self._data._parse_record_unchecked(compressed_bytes)
226
delta = self._plain_factory.parse_line_delta(contents, rec[1])
227
compression_parent = factory.parents[0]
228
# XXX: string splitting overhead.
229
basis_entry = self._basis_vf.get_record_stream(
230
[compression_parent], 'unordered', True).next()
231
if basis_entry.storage_kind == 'absent':
232
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
233
basis_chunks = basis_entry.get_bytes_as('chunked')
234
basis_lines = osutils.chunks_to_lines(basis_chunks)
235
basis_content = PlainKnitContent(basis_lines, compression_parent)
236
# Manually apply the delta because we have one annotated content and
238
content, _ = self._plain_factory.parse_record(rec[1], contents,
239
factory._build_details, basis_content)
240
return ''.join(content.text())
243
class KnitContentFactory(ContentFactory):
244
"""Content factory for streaming from knits.
246
:seealso ContentFactory:
249
def __init__(self, key, parents, build_details, sha1, raw_record,
250
annotated, knit=None):
251
"""Create a KnitContentFactory for key.
254
:param parents: The parents.
255
:param build_details: The build details as returned from
257
:param sha1: The sha1 expected from the full text of this object.
258
:param raw_record: The bytes of the knit data from disk.
259
:param annotated: True if the raw data is annotated.
261
ContentFactory.__init__(self)
264
self.parents = parents
265
if build_details[0] == 'line-delta':
270
annotated_kind = 'annotated-'
273
self.storage_kind = 'knit-%s%s-gz' % (annotated_kind, kind)
274
self._raw_record = raw_record
275
self._build_details = build_details
278
def get_bytes_as(self, storage_kind):
279
if storage_kind == self.storage_kind:
280
return self._raw_record
281
if self._knit is not None:
282
if storage_kind == 'chunked':
283
return self._knit.get_lines(self.key[0])
284
elif storage_kind == 'fulltext':
285
return self._knit.get_text(self.key[0])
286
raise errors.UnavailableRepresentation(self.key, storage_kind,
290
class KnitContent(object):
291
"""Content of a knit version to which deltas can be applied.
293
This is always stored in memory as a list of lines with \n at the end,
294
plus a flag saying if the final ending is really there or not, because that
295
corresponds to the on-disk knit representation.
299
self._should_strip_eol = False
301
def apply_delta(self, delta, new_version_id):
302
"""Apply delta to this object to become new_version_id."""
303
raise NotImplementedError(self.apply_delta)
305
def line_delta_iter(self, new_lines):
306
"""Generate line-based delta from this content to new_lines."""
307
new_texts = new_lines.text()
308
old_texts = self.text()
309
s = patiencediff.PatienceSequenceMatcher(None, old_texts, new_texts)
310
for tag, i1, i2, j1, j2 in s.get_opcodes():
313
# ofrom, oto, length, data
314
yield i1, i2, j2 - j1, new_lines._lines[j1:j2]
316
def line_delta(self, new_lines):
317
return list(self.line_delta_iter(new_lines))
320
def get_line_delta_blocks(knit_delta, source, target):
321
"""Extract SequenceMatcher.get_matching_blocks() from a knit delta"""
322
target_len = len(target)
325
for s_begin, s_end, t_len, new_text in knit_delta:
326
true_n = s_begin - s_pos
329
# knit deltas do not provide reliable info about whether the
330
# last line of a file matches, due to eol handling.
331
if source[s_pos + n -1] != target[t_pos + n -1]:
334
yield s_pos, t_pos, n
335
t_pos += t_len + true_n
337
n = target_len - t_pos
339
if source[s_pos + n -1] != target[t_pos + n -1]:
342
yield s_pos, t_pos, n
343
yield s_pos + (target_len - t_pos), target_len, 0
346
class AnnotatedKnitContent(KnitContent):
347
"""Annotated content."""
349
def __init__(self, lines):
350
KnitContent.__init__(self)
354
"""Return a list of (origin, text) for each content line."""
355
lines = self._lines[:]
356
if self._should_strip_eol:
357
origin, last_line = lines[-1]
358
lines[-1] = (origin, last_line.rstrip('\n'))
361
def apply_delta(self, delta, new_version_id):
362
"""Apply delta to this object to become new_version_id."""
365
for start, end, count, delta_lines in delta:
366
lines[offset+start:offset+end] = delta_lines
367
offset = offset + (start - end) + count
371
lines = [text for origin, text in self._lines]
372
except ValueError, e:
373
# most commonly (only?) caused by the internal form of the knit
374
# missing annotation information because of a bug - see thread
376
raise KnitCorrupt(self,
377
"line in annotated knit missing annotation information: %s"
379
if self._should_strip_eol:
380
lines[-1] = lines[-1].rstrip('\n')
384
return AnnotatedKnitContent(self._lines[:])
387
class PlainKnitContent(KnitContent):
388
"""Unannotated content.
390
When annotate[_iter] is called on this content, the same version is reported
391
for all lines. Generally, annotate[_iter] is not useful on PlainKnitContent
395
def __init__(self, lines, version_id):
396
KnitContent.__init__(self)
398
self._version_id = version_id
401
"""Return a list of (origin, text) for each content line."""
402
return [(self._version_id, line) for line in self._lines]
404
def apply_delta(self, delta, new_version_id):
405
"""Apply delta to this object to become new_version_id."""
408
for start, end, count, delta_lines in delta:
409
lines[offset+start:offset+end] = delta_lines
410
offset = offset + (start - end) + count
411
self._version_id = new_version_id
414
return PlainKnitContent(self._lines[:], self._version_id)
418
if self._should_strip_eol:
420
lines[-1] = lines[-1].rstrip('\n')
424
class _KnitFactory(object):
425
"""Base class for common Factory functions."""
427
def parse_record(self, version_id, record, record_details,
428
base_content, copy_base_content=True):
429
"""Parse a record into a full content object.
431
:param version_id: The official version id for this content
432
:param record: The data returned by read_records_iter()
433
:param record_details: Details about the record returned by
435
:param base_content: If get_build_details returns a compression_parent,
436
you must return a base_content here, else use None
437
:param copy_base_content: When building from the base_content, decide
438
you can either copy it and return a new object, or modify it in
440
:return: (content, delta) A Content object and possibly a line-delta,
443
method, noeol = record_details
444
if method == 'line-delta':
445
if copy_base_content:
446
content = base_content.copy()
448
content = base_content
449
delta = self.parse_line_delta(record, version_id)
450
content.apply_delta(delta, version_id)
452
content = self.parse_fulltext(record, version_id)
454
content._should_strip_eol = noeol
455
return (content, delta)
458
class KnitAnnotateFactory(_KnitFactory):
459
"""Factory for creating annotated Content objects."""
463
def make(self, lines, version_id):
464
num_lines = len(lines)
465
return AnnotatedKnitContent(zip([version_id] * num_lines, lines))
467
def parse_fulltext(self, content, version_id):
468
"""Convert fulltext to internal representation
470
fulltext content is of the format
471
revid(utf8) plaintext\n
472
internal representation is of the format:
475
# TODO: jam 20070209 The tests expect this to be returned as tuples,
476
# but the code itself doesn't really depend on that.
477
# Figure out a way to not require the overhead of turning the
478
# list back into tuples.
479
lines = [tuple(line.split(' ', 1)) for line in content]
480
return AnnotatedKnitContent(lines)
482
def parse_line_delta_iter(self, lines):
483
return iter(self.parse_line_delta(lines))
485
def parse_line_delta(self, lines, version_id, plain=False):
486
"""Convert a line based delta into internal representation.
488
line delta is in the form of:
489
intstart intend intcount
491
revid(utf8) newline\n
492
internal representation is
493
(start, end, count, [1..count tuples (revid, newline)])
495
:param plain: If True, the lines are returned as a plain
496
list without annotations, not as a list of (origin, content) tuples, i.e.
497
(start, end, count, [1..count newline])
504
def cache_and_return(line):
505
origin, text = line.split(' ', 1)
506
return cache.setdefault(origin, origin), text
508
# walk through the lines parsing.
509
# Note that the plain test is explicitly pulled out of the
510
# loop to minimise any performance impact
513
start, end, count = [int(n) for n in header.split(',')]
514
contents = [next().split(' ', 1)[1] for i in xrange(count)]
515
result.append((start, end, count, contents))
518
start, end, count = [int(n) for n in header.split(',')]
519
contents = [tuple(next().split(' ', 1)) for i in xrange(count)]
520
result.append((start, end, count, contents))
523
def get_fulltext_content(self, lines):
524
"""Extract just the content lines from a fulltext."""
525
return (line.split(' ', 1)[1] for line in lines)
527
def get_linedelta_content(self, lines):
528
"""Extract just the content from a line delta.
530
This doesn't return all of the extra information stored in a delta.
531
Only the actual content lines.
536
header = header.split(',')
537
count = int(header[2])
538
for i in xrange(count):
539
origin, text = next().split(' ', 1)
542
def lower_fulltext(self, content):
543
"""convert a fulltext content record into a serializable form.
545
see parse_fulltext which this inverts.
547
# TODO: jam 20070209 We only do the caching thing to make sure that
548
# the origin is a valid utf-8 line, eventually we could remove it
549
return ['%s %s' % (o, t) for o, t in content._lines]
551
def lower_line_delta(self, delta):
552
"""convert a delta into a serializable form.
554
See parse_line_delta which this inverts.
556
# TODO: jam 20070209 We only do the caching thing to make sure that
557
# the origin is a valid utf-8 line, eventually we could remove it
559
for start, end, c, lines in delta:
560
out.append('%d,%d,%d\n' % (start, end, c))
561
out.extend(origin + ' ' + text
562
for origin, text in lines)
565
def annotate(self, knit, key):
566
content = knit._get_content(key)
567
# adjust for the fact that serialised annotations are only key suffixes
569
if type(key) == tuple:
571
origins = content.annotate()
573
for origin, line in origins:
574
result.append((prefix + (origin,), line))
577
# XXX: This smells a bit. Why would key ever be a non-tuple here?
578
# Aren't keys defined to be tuples? -- spiv 20080618
579
return content.annotate()
582
class KnitPlainFactory(_KnitFactory):
583
"""Factory for creating plain Content objects."""
587
def make(self, lines, version_id):
588
return PlainKnitContent(lines, version_id)
590
def parse_fulltext(self, content, version_id):
591
"""This parses an unannotated fulltext.
593
Note that this is not a noop - the internal representation
594
has (versionid, line) - its just a constant versionid.
596
return self.make(content, version_id)
598
def parse_line_delta_iter(self, lines, version_id):
600
num_lines = len(lines)
601
while cur < num_lines:
604
start, end, c = [int(n) for n in header.split(',')]
605
yield start, end, c, lines[cur:cur+c]
608
def parse_line_delta(self, lines, version_id):
609
return list(self.parse_line_delta_iter(lines, version_id))
611
def get_fulltext_content(self, lines):
612
"""Extract just the content lines from a fulltext."""
615
def get_linedelta_content(self, lines):
616
"""Extract just the content from a line delta.
618
This doesn't return all of the extra information stored in a delta.
619
Only the actual content lines.
624
header = header.split(',')
625
count = int(header[2])
626
for i in xrange(count):
629
def lower_fulltext(self, content):
630
return content.text()
632
def lower_line_delta(self, delta):
634
for start, end, c, lines in delta:
635
out.append('%d,%d,%d\n' % (start, end, c))
639
def annotate(self, knit, key):
640
annotator = _KnitAnnotator(knit)
641
return annotator.annotate(key)
645
def make_file_factory(annotated, mapper):
646
"""Create a factory for creating a file based KnitVersionedFiles.
648
This is only functional enough to run interface tests, it doesn't try to
649
provide a full pack environment.
651
:param annotated: knit annotations are wanted.
652
:param mapper: The mapper from keys to paths.
654
def factory(transport):
655
index = _KndxIndex(transport, mapper, lambda:None, lambda:True, lambda:True)
656
access = _KnitKeyAccess(transport, mapper)
657
return KnitVersionedFiles(index, access, annotated=annotated)
661
def make_pack_factory(graph, delta, keylength):
662
"""Create a factory for creating a pack based VersionedFiles.
664
This is only functional enough to run interface tests, it doesn't try to
665
provide a full pack environment.
667
:param graph: Store a graph.
668
:param delta: Delta compress contents.
669
:param keylength: How long should keys be.
671
def factory(transport):
672
parents = graph or delta
678
max_delta_chain = 200
681
graph_index = _mod_index.InMemoryGraphIndex(reference_lists=ref_length,
682
key_elements=keylength)
683
stream = transport.open_write_stream('newpack')
684
writer = pack.ContainerWriter(stream.write)
686
index = _KnitGraphIndex(graph_index, lambda:True, parents=parents,
687
deltas=delta, add_callback=graph_index.add_nodes)
688
access = _DirectPackAccess({})
689
access.set_writer(writer, graph_index, (transport, 'newpack'))
690
result = KnitVersionedFiles(index, access,
691
max_delta_chain=max_delta_chain)
692
result.stream = stream
693
result.writer = writer
698
def cleanup_pack_knit(versioned_files):
699
versioned_files.stream.close()
700
versioned_files.writer.end()
703
class KnitVersionedFiles(VersionedFiles):
704
"""Storage for many versioned files using knit compression.
706
Backend storage is managed by indices and data objects.
708
:ivar _index: A _KnitGraphIndex or similar that can describe the
709
parents, graph, compression and data location of entries in this
710
KnitVersionedFiles. Note that this is only the index for
711
*this* vfs; if there are fallbacks they must be queried separately.
714
def __init__(self, index, data_access, max_delta_chain=200,
715
annotated=False, reload_func=None):
716
"""Create a KnitVersionedFiles with index and data_access.
718
:param index: The index for the knit data.
719
:param data_access: The access object to store and retrieve knit
721
:param max_delta_chain: The maximum number of deltas to permit during
722
insertion. Set to 0 to prohibit the use of deltas.
723
:param annotated: Set to True to cause annotations to be calculated and
724
stored during insertion.
725
:param reload_func: An function that can be called if we think we need
726
to reload the pack listing and try again. See
727
'bzrlib.repofmt.pack_repo.AggregateIndex' for the signature.
730
self._access = data_access
731
self._max_delta_chain = max_delta_chain
733
self._factory = KnitAnnotateFactory()
735
self._factory = KnitPlainFactory()
736
self._fallback_vfs = []
737
self._reload_func = reload_func
740
return "%s(%r, %r)" % (
741
self.__class__.__name__,
745
def add_fallback_versioned_files(self, a_versioned_files):
746
"""Add a source of texts for texts not present in this knit.
748
:param a_versioned_files: A VersionedFiles object.
750
self._fallback_vfs.append(a_versioned_files)
752
def add_lines(self, key, parents, lines, parent_texts=None,
753
left_matching_blocks=None, nostore_sha=None, random_id=False,
755
"""See VersionedFiles.add_lines()."""
756
self._index._check_write_ok()
757
self._check_add(key, lines, random_id, check_content)
759
# The caller might pass None if there is no graph data, but kndx
760
# indexes can't directly store that, so we give them
761
# an empty tuple instead.
763
return self._add(key, lines, parents,
764
parent_texts, left_matching_blocks, nostore_sha, random_id)
766
def _add(self, key, lines, parents, parent_texts,
767
left_matching_blocks, nostore_sha, random_id):
768
"""Add a set of lines on top of version specified by parents.
770
Any versions not present will be converted into ghosts.
772
# first thing, if the content is something we don't need to store, find
774
line_bytes = ''.join(lines)
775
digest = sha_string(line_bytes)
776
if nostore_sha == digest:
777
raise errors.ExistingContent
780
if parent_texts is None:
782
# Do a single query to ascertain parent presence; we only compress
783
# against parents in the same kvf.
784
present_parent_map = self._index.get_parent_map(parents)
785
for parent in parents:
786
if parent in present_parent_map:
787
present_parents.append(parent)
789
# Currently we can only compress against the left most present parent.
790
if (len(present_parents) == 0 or
791
present_parents[0] != parents[0]):
794
# To speed the extract of texts the delta chain is limited
795
# to a fixed number of deltas. This should minimize both
796
# I/O and the time spend applying deltas.
797
delta = self._check_should_delta(present_parents[0])
799
text_length = len(line_bytes)
802
if lines[-1][-1] != '\n':
803
# copy the contents of lines.
805
options.append('no-eol')
806
lines[-1] = lines[-1] + '\n'
810
if type(element) != str:
811
raise TypeError("key contains non-strings: %r" % (key,))
812
# Knit hunks are still last-element only
814
content = self._factory.make(lines, version_id)
815
if 'no-eol' in options:
816
# Hint to the content object that its text() call should strip the
818
content._should_strip_eol = True
819
if delta or (self._factory.annotated and len(present_parents) > 0):
820
# Merge annotations from parent texts if needed.
821
delta_hunks = self._merge_annotations(content, present_parents,
822
parent_texts, delta, self._factory.annotated,
823
left_matching_blocks)
826
options.append('line-delta')
827
store_lines = self._factory.lower_line_delta(delta_hunks)
828
size, bytes = self._record_to_data(key, digest,
831
options.append('fulltext')
832
# isinstance is slower and we have no hierarchy.
833
if self._factory.__class__ == KnitPlainFactory:
834
# Use the already joined bytes saving iteration time in
836
size, bytes = self._record_to_data(key, digest,
839
# get mixed annotation + content and feed it into the
841
store_lines = self._factory.lower_fulltext(content)
842
size, bytes = self._record_to_data(key, digest,
845
access_memo = self._access.add_raw_records([(key, size)], bytes)[0]
846
self._index.add_records(
847
((key, options, access_memo, parents),),
849
return digest, text_length, content
851
def annotate(self, key):
852
"""See VersionedFiles.annotate."""
853
return self._factory.annotate(self, key)
855
def check(self, progress_bar=None):
856
"""See VersionedFiles.check()."""
857
# This doesn't actually test extraction of everything, but that will
858
# impact 'bzr check' substantially, and needs to be integrated with
859
# care. However, it does check for the obvious problem of a delta with
861
keys = self._index.keys()
862
parent_map = self.get_parent_map(keys)
864
if self._index.get_method(key) != 'fulltext':
865
compression_parent = parent_map[key][0]
866
if compression_parent not in parent_map:
867
raise errors.KnitCorrupt(self,
868
"Missing basis parent %s for %s" % (
869
compression_parent, key))
870
for fallback_vfs in self._fallback_vfs:
873
def _check_add(self, key, lines, random_id, check_content):
874
"""check that version_id and lines are safe to add."""
876
if contains_whitespace(version_id):
877
raise InvalidRevisionId(version_id, self)
878
self.check_not_reserved_id(version_id)
879
# TODO: If random_id==False and the key is already present, we should
880
# probably check that the existing content is identical to what is
881
# being inserted, and otherwise raise an exception. This would make
882
# the bundle code simpler.
884
self._check_lines_not_unicode(lines)
885
self._check_lines_are_lines(lines)
887
def _check_header(self, key, line):
888
rec = self._split_header(line)
889
self._check_header_version(rec, key[-1])
892
def _check_header_version(self, rec, version_id):
893
"""Checks the header version on original format knit records.
895
These have the last component of the key embedded in the record.
897
if rec[1] != version_id:
898
raise KnitCorrupt(self,
899
'unexpected version, wanted %r, got %r' % (version_id, rec[1]))
901
def _check_should_delta(self, parent):
902
"""Iterate back through the parent listing, looking for a fulltext.
904
This is used when we want to decide whether to add a delta or a new
905
fulltext. It searches for _max_delta_chain parents. When it finds a
906
fulltext parent, it sees if the total size of the deltas leading up to
907
it is large enough to indicate that we want a new full text anyway.
909
Return True if we should create a new delta, False if we should use a
914
for count in xrange(self._max_delta_chain):
916
# Note that this only looks in the index of this particular
917
# KnitVersionedFiles, not in the fallbacks. This ensures that
918
# we won't store a delta spanning physical repository
920
build_details = self._index.get_build_details([parent])
921
parent_details = build_details[parent]
922
except RevisionNotPresent, KeyError:
923
# Some basis is not locally present: always fulltext
925
index_memo, compression_parent, _, _ = parent_details
926
_, _, size = index_memo
927
if compression_parent is None:
931
# We don't explicitly check for presence because this is in an
932
# inner loop, and if it's missing it'll fail anyhow.
933
parent = compression_parent
935
# We couldn't find a fulltext, so we must create a new one
937
# Simple heuristic - if the total I/O wold be greater as a delta than
938
# the originally installed fulltext, we create a new fulltext.
939
return fulltext_size > delta_size
941
def _build_details_to_components(self, build_details):
942
"""Convert a build_details tuple to a position tuple."""
943
# record_details, access_memo, compression_parent
944
return build_details[3], build_details[0], build_details[1]
946
def _get_components_positions(self, keys, allow_missing=False):
947
"""Produce a map of position data for the components of keys.
949
This data is intended to be used for retrieving the knit records.
951
A dict of key to (record_details, index_memo, next, parents) is
953
method is the way referenced data should be applied.
954
index_memo is the handle to pass to the data access to actually get the
956
next is the build-parent of the version, or None for fulltexts.
957
parents is the version_ids of the parents of this version
959
:param allow_missing: If True do not raise an error on a missing component,
963
pending_components = keys
964
while pending_components:
965
build_details = self._index.get_build_details(pending_components)
966
current_components = set(pending_components)
967
pending_components = set()
968
for key, details in build_details.iteritems():
969
(index_memo, compression_parent, parents,
970
record_details) = details
971
method = record_details[0]
972
if compression_parent is not None:
973
pending_components.add(compression_parent)
974
component_data[key] = self._build_details_to_components(details)
975
missing = current_components.difference(build_details)
976
if missing and not allow_missing:
977
raise errors.RevisionNotPresent(missing.pop(), self)
978
return component_data
980
def _get_content(self, key, parent_texts={}):
981
"""Returns a content object that makes up the specified
983
cached_version = parent_texts.get(key, None)
984
if cached_version is not None:
985
# Ensure the cache dict is valid.
986
if not self.get_parent_map([key]):
987
raise RevisionNotPresent(key, self)
988
return cached_version
989
text_map, contents_map = self._get_content_maps([key])
990
return contents_map[key]
992
def _get_content_maps(self, keys, nonlocal_keys=None):
993
"""Produce maps of text and KnitContents
995
:param keys: The keys to produce content maps for.
996
:param nonlocal_keys: An iterable of keys(possibly intersecting keys)
997
which are known to not be in this knit, but rather in one of the
999
:return: (text_map, content_map) where text_map contains the texts for
1000
the requested versions and content_map contains the KnitContents.
1002
# FUTURE: This function could be improved for the 'extract many' case
1003
# by tracking each component and only doing the copy when the number of
1004
# children than need to apply delta's to it is > 1 or it is part of the
1007
multiple_versions = len(keys) != 1
1008
record_map = self._get_record_map(keys, allow_missing=True)
1013
if nonlocal_keys is None:
1014
nonlocal_keys = set()
1016
nonlocal_keys = frozenset(nonlocal_keys)
1017
missing_keys = set(nonlocal_keys)
1018
for source in self._fallback_vfs:
1019
if not missing_keys:
1021
for record in source.get_record_stream(missing_keys,
1023
if record.storage_kind == 'absent':
1025
missing_keys.remove(record.key)
1026
lines = osutils.chunks_to_lines(record.get_bytes_as('chunked'))
1027
text_map[record.key] = lines
1028
content_map[record.key] = PlainKnitContent(lines, record.key)
1029
if record.key in keys:
1030
final_content[record.key] = content_map[record.key]
1032
if key in nonlocal_keys:
1037
while cursor is not None:
1039
record, record_details, digest, next = record_map[cursor]
1041
raise RevisionNotPresent(cursor, self)
1042
components.append((cursor, record, record_details, digest))
1044
if cursor in content_map:
1045
# no need to plan further back
1046
components.append((cursor, None, None, None))
1050
for (component_id, record, record_details,
1051
digest) in reversed(components):
1052
if component_id in content_map:
1053
content = content_map[component_id]
1055
content, delta = self._factory.parse_record(key[-1],
1056
record, record_details, content,
1057
copy_base_content=multiple_versions)
1058
if multiple_versions:
1059
content_map[component_id] = content
1061
final_content[key] = content
1063
# digest here is the digest from the last applied component.
1064
text = content.text()
1065
actual_sha = sha_strings(text)
1066
if actual_sha != digest:
1067
raise SHA1KnitCorrupt(self, actual_sha, digest, key, text)
1068
text_map[key] = text
1069
return text_map, final_content
1071
def get_parent_map(self, keys):
1072
"""Get a map of the graph parents of keys.
1074
:param keys: The keys to look up parents for.
1075
:return: A mapping from keys to parents. Absent keys are absent from
1078
return self._get_parent_map_with_sources(keys)[0]
1080
def _get_parent_map_with_sources(self, keys):
1081
"""Get a map of the parents of keys.
1083
:param keys: The keys to look up parents for.
1084
:return: A tuple. The first element is a mapping from keys to parents.
1085
Absent keys are absent from the mapping. The second element is a
1086
list with the locations each key was found in. The first element
1087
is the in-this-knit parents, the second the first fallback source,
1091
sources = [self._index] + self._fallback_vfs
1094
for source in sources:
1097
new_result = source.get_parent_map(missing)
1098
source_results.append(new_result)
1099
result.update(new_result)
1100
missing.difference_update(set(new_result))
1101
return result, source_results
1103
def _get_record_map(self, keys, allow_missing=False):
1104
"""Produce a dictionary of knit records.
1106
:return: {key:(record, record_details, digest, next)}
1108
data returned from read_records
1110
opaque information to pass to parse_record
1112
SHA1 digest of the full text after all steps are done
1114
build-parent of the version, i.e. the leftmost ancestor.
1115
Will be None if the record is not a delta.
1116
:param keys: The keys to build a map for
1117
:param allow_missing: If some records are missing, rather than
1118
error, just return the data that could be generated.
1120
# This retries the whole request if anything fails. Potentially we
1121
# could be a bit more selective. We could track the keys whose records
1122
# we have successfully found, and then only request the new records
1123
# from there. However, _get_components_positions grabs the whole build
1124
# chain, which means we'll likely try to grab the same records again
1125
# anyway. Also, can the build chains change as part of a pack
1126
# operation? We wouldn't want to end up with a broken chain.
1129
position_map = self._get_components_positions(keys,
1130
allow_missing=allow_missing)
1131
# key = component_id, r = record_details, i_m = index_memo,
1133
records = [(key, i_m) for key, (r, i_m, n)
1134
in position_map.iteritems()]
1136
for key, record, digest in self._read_records_iter(records):
1137
(record_details, index_memo, next) = position_map[key]
1138
record_map[key] = record, record_details, digest, next
1140
except errors.RetryWithNewPacks, e:
1141
self._access.reload_or_raise(e)
1143
def _split_by_prefix(self, keys):
1144
"""For the given keys, split them up based on their prefix.
1146
To keep memory pressure somewhat under control, split the
1147
requests back into per-file-id requests, otherwise "bzr co"
1148
extracts the full tree into memory before writing it to disk.
1149
This should be revisited if _get_content_maps() can ever cross
1152
:param keys: An iterable of key tuples
1153
:return: A dict of {prefix: [key_list]}
1155
split_by_prefix = {}
1158
split_by_prefix.setdefault('', []).append(key)
1160
split_by_prefix.setdefault(key[0], []).append(key)
1161
return split_by_prefix
1163
def get_record_stream(self, keys, ordering, include_delta_closure):
1164
"""Get a stream of records for keys.
1166
:param keys: The keys to include.
1167
:param ordering: Either 'unordered' or 'topological'. A topologically
1168
sorted stream has compression parents strictly before their
1170
:param include_delta_closure: If True then the closure across any
1171
compression parents will be included (in the opaque data).
1172
:return: An iterator of ContentFactory objects, each of which is only
1173
valid until the iterator is advanced.
1175
# keys might be a generator
1179
if not self._index.has_graph:
1180
# Cannot topological order when no graph has been stored.
1181
ordering = 'unordered'
1183
remaining_keys = keys
1186
keys = set(remaining_keys)
1187
for content_factory in self._get_remaining_record_stream(keys,
1188
ordering, include_delta_closure):
1189
remaining_keys.discard(content_factory.key)
1190
yield content_factory
1192
except errors.RetryWithNewPacks, e:
1193
self._access.reload_or_raise(e)
1195
def _get_remaining_record_stream(self, keys, ordering,
1196
include_delta_closure):
1197
"""This function is the 'retry' portion for get_record_stream."""
1198
if include_delta_closure:
1199
positions = self._get_components_positions(keys, allow_missing=True)
1201
build_details = self._index.get_build_details(keys)
1203
# (record_details, access_memo, compression_parent_key)
1204
positions = dict((key, self._build_details_to_components(details))
1205
for key, details in build_details.iteritems())
1206
absent_keys = keys.difference(set(positions))
1207
# There may be more absent keys : if we're missing the basis component
1208
# and are trying to include the delta closure.
1209
if include_delta_closure:
1210
needed_from_fallback = set()
1211
# Build up reconstructable_keys dict. key:True in this dict means
1212
# the key can be reconstructed.
1213
reconstructable_keys = {}
1217
chain = [key, positions[key][2]]
1219
needed_from_fallback.add(key)
1222
while chain[-1] is not None:
1223
if chain[-1] in reconstructable_keys:
1224
result = reconstructable_keys[chain[-1]]
1228
chain.append(positions[chain[-1]][2])
1230
# missing basis component
1231
needed_from_fallback.add(chain[-1])
1234
for chain_key in chain[:-1]:
1235
reconstructable_keys[chain_key] = result
1237
needed_from_fallback.add(key)
1238
# Double index lookups here : need a unified api ?
1239
global_map, parent_maps = self._get_parent_map_with_sources(keys)
1240
if ordering == 'topological':
1241
# Global topological sort
1242
present_keys = tsort.topo_sort(global_map)
1243
# Now group by source:
1245
current_source = None
1246
for key in present_keys:
1247
for parent_map in parent_maps:
1248
if key in parent_map:
1249
key_source = parent_map
1251
if current_source is not key_source:
1252
source_keys.append((key_source, []))
1253
current_source = key_source
1254
source_keys[-1][1].append(key)
1256
if ordering != 'unordered':
1257
raise AssertionError('valid values for ordering are:'
1258
' "unordered" or "topological" not: %r'
1260
# Just group by source; remote sources first.
1263
for parent_map in reversed(parent_maps):
1264
source_keys.append((parent_map, []))
1265
for key in parent_map:
1266
present_keys.append(key)
1267
source_keys[-1][1].append(key)
1268
# We have been requested to return these records in an order that
1269
# suits us. So we ask the index to give us an optimally sorted
1271
for source, sub_keys in source_keys:
1272
if source is parent_maps[0]:
1273
# Only sort the keys for this VF
1274
self._index._sort_keys_by_io(sub_keys, positions)
1275
absent_keys = keys - set(global_map)
1276
for key in absent_keys:
1277
yield AbsentContentFactory(key)
1278
# restrict our view to the keys we can answer.
1279
# XXX: Memory: TODO: batch data here to cap buffered data at (say) 1MB.
1280
# XXX: At that point we need to consider the impact of double reads by
1281
# utilising components multiple times.
1282
if include_delta_closure:
1283
# XXX: get_content_maps performs its own index queries; allow state
1285
non_local_keys = needed_from_fallback - absent_keys
1286
prefix_split_keys = self._split_by_prefix(present_keys)
1287
prefix_split_non_local_keys = self._split_by_prefix(non_local_keys)
1288
for prefix, keys in prefix_split_keys.iteritems():
1289
non_local = prefix_split_non_local_keys.get(prefix, [])
1290
non_local = set(non_local)
1291
text_map, _ = self._get_content_maps(keys, non_local)
1293
lines = text_map.pop(key)
1294
yield ChunkedContentFactory(key, global_map[key], None,
1297
for source, keys in source_keys:
1298
if source is parent_maps[0]:
1299
# this KnitVersionedFiles
1300
records = [(key, positions[key][1]) for key in keys]
1301
for key, raw_data, sha1 in self._read_records_iter_raw(records):
1302
(record_details, index_memo, _) = positions[key]
1303
yield KnitContentFactory(key, global_map[key],
1304
record_details, sha1, raw_data, self._factory.annotated, None)
1306
vf = self._fallback_vfs[parent_maps.index(source) - 1]
1307
for record in vf.get_record_stream(keys, ordering,
1308
include_delta_closure):
1311
def get_sha1s(self, keys):
1312
"""See VersionedFiles.get_sha1s()."""
1314
record_map = self._get_record_map(missing, allow_missing=True)
1316
for key, details in record_map.iteritems():
1317
if key not in missing:
1319
# record entry 2 is the 'digest'.
1320
result[key] = details[2]
1321
missing.difference_update(set(result))
1322
for source in self._fallback_vfs:
1325
new_result = source.get_sha1s(missing)
1326
result.update(new_result)
1327
missing.difference_update(set(new_result))
1330
def insert_record_stream(self, stream):
1331
"""Insert a record stream into this container.
1333
:param stream: A stream of records to insert.
1335
:seealso VersionedFiles.get_record_stream:
1337
def get_adapter(adapter_key):
1339
return adapters[adapter_key]
1341
adapter_factory = adapter_registry.get(adapter_key)
1342
adapter = adapter_factory(self)
1343
adapters[adapter_key] = adapter
1346
if self._factory.annotated:
1347
# self is annotated, we need annotated knits to use directly.
1348
annotated = "annotated-"
1351
# self is not annotated, but we can strip annotations cheaply.
1353
convertibles = set(["knit-annotated-ft-gz"])
1354
if self._max_delta_chain:
1355
delta_types.add("knit-annotated-delta-gz")
1356
convertibles.add("knit-annotated-delta-gz")
1357
# The set of types we can cheaply adapt without needing basis texts.
1358
native_types = set()
1359
if self._max_delta_chain:
1360
native_types.add("knit-%sdelta-gz" % annotated)
1361
delta_types.add("knit-%sdelta-gz" % annotated)
1362
native_types.add("knit-%sft-gz" % annotated)
1363
knit_types = native_types.union(convertibles)
1365
# Buffer all index entries that we can't add immediately because their
1366
# basis parent is missing. We don't buffer all because generating
1367
# annotations may require access to some of the new records. However we
1368
# can't generate annotations from new deltas until their basis parent
1369
# is present anyway, so we get away with not needing an index that
1370
# includes the new keys.
1372
# See <http://launchpad.net/bugs/300177> about ordering of compression
1373
# parents in the records - to be conservative, we insist that all
1374
# parents must be present to avoid expanding to a fulltext.
1376
# key = basis_parent, value = index entry to add
1377
buffered_index_entries = {}
1378
for record in stream:
1379
parents = record.parents
1380
# trace.mutter('inserting record %s (kind: %s, parents: %r)',
1381
# record.key, record.storage_kind, parents)
1382
if record.storage_kind in delta_types:
1383
# TODO: eventually the record itself should track
1384
# compression_parent
1385
compression_parent = parents[0]
1387
compression_parent = None
1388
# Raise an error when a record is missing.
1389
if record.storage_kind == 'absent':
1390
raise RevisionNotPresent([record.key], self)
1391
elif ((record.storage_kind in knit_types)
1392
and (compression_parent is None
1393
or not self._fallback_vfs
1394
or self._index.has_key(compression_parent)
1395
or not self.has_key(compression_parent))):
1396
# we can insert the knit record literally if either it has no
1397
# compression parent OR we already have its basis in this kvf
1398
# OR the basis is not present even in the fallbacks. In the
1399
# last case it will either turn up later in the stream and all
1400
# will be well, or it won't turn up at all and we'll raise an
1403
# TODO: self.has_key is somewhat redundant with
1404
# self._index.has_key; we really want something that directly
1405
# asks if it's only present in the fallbacks. -- mbp 20081119
1406
if record.storage_kind not in native_types:
1408
adapter_key = (record.storage_kind, "knit-delta-gz")
1409
adapter = get_adapter(adapter_key)
1411
adapter_key = (record.storage_kind, "knit-ft-gz")
1412
adapter = get_adapter(adapter_key)
1413
bytes = adapter.get_bytes(
1414
record, record.get_bytes_as(record.storage_kind))
1416
bytes = record.get_bytes_as(record.storage_kind)
1417
options = [record._build_details[0]]
1418
if record._build_details[1]:
1419
options.append('no-eol')
1420
# Just blat it across.
1421
# Note: This does end up adding data on duplicate keys. As
1422
# modern repositories use atomic insertions this should not
1423
# lead to excessive growth in the event of interrupted fetches.
1424
# 'knit' repositories may suffer excessive growth, but as a
1425
# deprecated format this is tolerable. It can be fixed if
1426
# needed by in the kndx index support raising on a duplicate
1427
# add with identical parents and options.
1428
access_memo = self._access.add_raw_records(
1429
[(record.key, len(bytes))], bytes)[0]
1430
index_entry = (record.key, options, access_memo, parents)
1432
if 'fulltext' not in options:
1433
# Not a fulltext, so we need to make sure the compression
1434
# parent will also be present.
1435
# Note that pack backed knits don't need to buffer here
1436
# because they buffer all writes to the transaction level,
1437
# but we don't expose that difference at the index level. If
1438
# the query here has sufficient cost to show up in
1439
# profiling we should do that.
1441
# They're required to be physically in this
1442
# KnitVersionedFiles, not in a fallback.
1443
if not self._index.has_key(compression_parent):
1444
pending = buffered_index_entries.setdefault(
1445
compression_parent, [])
1446
pending.append(index_entry)
1449
self._index.add_records([index_entry])
1450
elif record.storage_kind == 'chunked':
1451
self.add_lines(record.key, parents,
1452
osutils.chunks_to_lines(record.get_bytes_as('chunked')))
1453
elif record.storage_kind == 'fulltext':
1454
self.add_lines(record.key, parents,
1455
split_lines(record.get_bytes_as('fulltext')))
1457
# Not a fulltext, and not suitable for direct insertion as a
1458
# delta, either because it's not the right format, or this
1459
# KnitVersionedFiles doesn't permit deltas (_max_delta_chain ==
1460
# 0) or because it depends on a base only present in the
1462
adapter_key = record.storage_kind, 'fulltext'
1463
adapter = get_adapter(adapter_key)
1464
lines = split_lines(adapter.get_bytes(
1465
record, record.get_bytes_as(record.storage_kind)))
1467
self.add_lines(record.key, parents, lines)
1468
except errors.RevisionAlreadyPresent:
1470
# Add any records whose basis parent is now available.
1471
added_keys = [record.key]
1473
key = added_keys.pop(0)
1474
if key in buffered_index_entries:
1475
index_entries = buffered_index_entries[key]
1476
self._index.add_records(index_entries)
1478
[index_entry[0] for index_entry in index_entries])
1479
del buffered_index_entries[key]
1480
# If there were any deltas which had a missing basis parent, error.
1481
if buffered_index_entries:
1482
from pprint import pformat
1483
raise errors.BzrCheckError(
1484
"record_stream refers to compression parents not in %r:\n%s"
1485
% (self, pformat(sorted(buffered_index_entries.keys()))))
1487
def iter_lines_added_or_present_in_keys(self, keys, pb=None):
1488
"""Iterate over the lines in the versioned files from keys.
1490
This may return lines from other keys. Each item the returned
1491
iterator yields is a tuple of a line and a text version that that line
1492
is present in (not introduced in).
1494
Ordering of results is in whatever order is most suitable for the
1495
underlying storage format.
1497
If a progress bar is supplied, it may be used to indicate progress.
1498
The caller is responsible for cleaning up progress bars (because this
1502
* Lines are normalised by the underlying store: they will all have \\n
1504
* Lines are returned in arbitrary order.
1505
* If a requested key did not change any lines (or didn't have any
1506
lines), it may not be mentioned at all in the result.
1508
:return: An iterator over (line, key).
1511
pb = progress.DummyProgress()
1517
# we don't care about inclusions, the caller cares.
1518
# but we need to setup a list of records to visit.
1519
# we need key, position, length
1521
build_details = self._index.get_build_details(keys)
1522
for key, details in build_details.iteritems():
1524
key_records.append((key, details[0]))
1525
records_iter = enumerate(self._read_records_iter(key_records))
1526
for (key_idx, (key, data, sha_value)) in records_iter:
1527
pb.update('Walking content.', key_idx, total)
1528
compression_parent = build_details[key][1]
1529
if compression_parent is None:
1531
line_iterator = self._factory.get_fulltext_content(data)
1534
line_iterator = self._factory.get_linedelta_content(data)
1535
# Now that we are yielding the data for this key, remove it
1538
# XXX: It might be more efficient to yield (key,
1539
# line_iterator) in the future. However for now, this is a
1540
# simpler change to integrate into the rest of the
1541
# codebase. RBC 20071110
1542
for line in line_iterator:
1545
except errors.RetryWithNewPacks, e:
1546
self._access.reload_or_raise(e)
1547
# If there are still keys we've not yet found, we look in the fallback
1548
# vfs, and hope to find them there. Note that if the keys are found
1549
# but had no changes or no content, the fallback may not return
1551
if keys and not self._fallback_vfs:
1552
# XXX: strictly the second parameter is meant to be the file id
1553
# but it's not easily accessible here.
1554
raise RevisionNotPresent(keys, repr(self))
1555
for source in self._fallback_vfs:
1559
for line, key in source.iter_lines_added_or_present_in_keys(keys):
1560
source_keys.add(key)
1562
keys.difference_update(source_keys)
1563
pb.update('Walking content.', total, total)
1565
def _make_line_delta(self, delta_seq, new_content):
1566
"""Generate a line delta from delta_seq and new_content."""
1568
for op in delta_seq.get_opcodes():
1569
if op[0] == 'equal':
1571
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
1574
def _merge_annotations(self, content, parents, parent_texts={},
1575
delta=None, annotated=None,
1576
left_matching_blocks=None):
1577
"""Merge annotations for content and generate deltas.
1579
This is done by comparing the annotations based on changes to the text
1580
and generating a delta on the resulting full texts. If annotations are
1581
not being created then a simple delta is created.
1583
if left_matching_blocks is not None:
1584
delta_seq = diff._PrematchedMatcher(left_matching_blocks)
1588
for parent_key in parents:
1589
merge_content = self._get_content(parent_key, parent_texts)
1590
if (parent_key == parents[0] and delta_seq is not None):
1593
seq = patiencediff.PatienceSequenceMatcher(
1594
None, merge_content.text(), content.text())
1595
for i, j, n in seq.get_matching_blocks():
1598
# this copies (origin, text) pairs across to the new
1599
# content for any line that matches the last-checked
1601
content._lines[j:j+n] = merge_content._lines[i:i+n]
1602
# XXX: Robert says the following block is a workaround for a
1603
# now-fixed bug and it can probably be deleted. -- mbp 20080618
1604
if content._lines and content._lines[-1][1][-1] != '\n':
1605
# The copied annotation was from a line without a trailing EOL,
1606
# reinstate one for the content object, to ensure correct
1608
line = content._lines[-1][1] + '\n'
1609
content._lines[-1] = (content._lines[-1][0], line)
1611
if delta_seq is None:
1612
reference_content = self._get_content(parents[0], parent_texts)
1613
new_texts = content.text()
1614
old_texts = reference_content.text()
1615
delta_seq = patiencediff.PatienceSequenceMatcher(
1616
None, old_texts, new_texts)
1617
return self._make_line_delta(delta_seq, content)
1619
def _parse_record(self, version_id, data):
1620
"""Parse an original format knit record.
1622
These have the last element of the key only present in the stored data.
1624
rec, record_contents = self._parse_record_unchecked(data)
1625
self._check_header_version(rec, version_id)
1626
return record_contents, rec[3]
1628
def _parse_record_header(self, key, raw_data):
1629
"""Parse a record header for consistency.
1631
:return: the header and the decompressor stream.
1632
as (stream, header_record)
1634
df = tuned_gzip.GzipFile(mode='rb', fileobj=StringIO(raw_data))
1637
rec = self._check_header(key, df.readline())
1638
except Exception, e:
1639
raise KnitCorrupt(self,
1640
"While reading {%s} got %s(%s)"
1641
% (key, e.__class__.__name__, str(e)))
1644
def _parse_record_unchecked(self, data):
1646
# 4168 calls in 2880 217 internal
1647
# 4168 calls to _parse_record_header in 2121
1648
# 4168 calls to readlines in 330
1649
df = tuned_gzip.GzipFile(mode='rb', fileobj=StringIO(data))
1651
record_contents = df.readlines()
1652
except Exception, e:
1653
raise KnitCorrupt(self, "Corrupt compressed record %r, got %s(%s)" %
1654
(data, e.__class__.__name__, str(e)))
1655
header = record_contents.pop(0)
1656
rec = self._split_header(header)
1657
last_line = record_contents.pop()
1658
if len(record_contents) != int(rec[2]):
1659
raise KnitCorrupt(self,
1660
'incorrect number of lines %s != %s'
1661
' for version {%s} %s'
1662
% (len(record_contents), int(rec[2]),
1663
rec[1], record_contents))
1664
if last_line != 'end %s\n' % rec[1]:
1665
raise KnitCorrupt(self,
1666
'unexpected version end line %r, wanted %r'
1667
% (last_line, rec[1]))
1669
return rec, record_contents
1671
def _read_records_iter(self, records):
1672
"""Read text records from data file and yield result.
1674
The result will be returned in whatever is the fastest to read.
1675
Not by the order requested. Also, multiple requests for the same
1676
record will only yield 1 response.
1677
:param records: A list of (key, access_memo) entries
1678
:return: Yields (key, contents, digest) in the order
1679
read, not the order requested
1684
# XXX: This smells wrong, IO may not be getting ordered right.
1685
needed_records = sorted(set(records), key=operator.itemgetter(1))
1686
if not needed_records:
1689
# The transport optimizes the fetching as well
1690
# (ie, reads continuous ranges.)
1691
raw_data = self._access.get_raw_records(
1692
[index_memo for key, index_memo in needed_records])
1694
for (key, index_memo), data in \
1695
izip(iter(needed_records), raw_data):
1696
content, digest = self._parse_record(key[-1], data)
1697
yield key, content, digest
1699
def _read_records_iter_raw(self, records):
1700
"""Read text records from data file and yield raw data.
1702
This unpacks enough of the text record to validate the id is
1703
as expected but thats all.
1705
Each item the iterator yields is (key, bytes, sha1_of_full_text).
1707
# setup an iterator of the external records:
1708
# uses readv so nice and fast we hope.
1710
# grab the disk data needed.
1711
needed_offsets = [index_memo for key, index_memo
1713
raw_records = self._access.get_raw_records(needed_offsets)
1715
for key, index_memo in records:
1716
data = raw_records.next()
1717
# validate the header (note that we can only use the suffix in
1718
# current knit records).
1719
df, rec = self._parse_record_header(key, data)
1721
yield key, data, rec[3]
1723
def _record_to_data(self, key, digest, lines, dense_lines=None):
1724
"""Convert key, digest, lines into a raw data block.
1726
:param key: The key of the record. Currently keys are always serialised
1727
using just the trailing component.
1728
:param dense_lines: The bytes of lines but in a denser form. For
1729
instance, if lines is a list of 1000 bytestrings each ending in \n,
1730
dense_lines may be a list with one line in it, containing all the
1731
1000's lines and their \n's. Using dense_lines if it is already
1732
known is a win because the string join to create bytes in this
1733
function spends less time resizing the final string.
1734
:return: (len, a StringIO instance with the raw data ready to read.)
1736
# Note: using a string copy here increases memory pressure with e.g.
1737
# ISO's, but it is about 3 seconds faster on a 1.2Ghz intel machine
1738
# when doing the initial commit of a mozilla tree. RBC 20070921
1739
bytes = ''.join(chain(
1740
["version %s %d %s\n" % (key[-1],
1743
dense_lines or lines,
1744
["end %s\n" % key[-1]]))
1745
if type(bytes) != str:
1746
raise AssertionError(
1747
'data must be plain bytes was %s' % type(bytes))
1748
if lines and lines[-1][-1] != '\n':
1749
raise ValueError('corrupt lines value %r' % lines)
1750
compressed_bytes = tuned_gzip.bytes_to_gzip(bytes)
1751
return len(compressed_bytes), compressed_bytes
1753
def _split_header(self, line):
1756
raise KnitCorrupt(self,
1757
'unexpected number of elements in record header')
1761
"""See VersionedFiles.keys."""
1762
if 'evil' in debug.debug_flags:
1763
trace.mutter_callsite(2, "keys scales with size of history")
1764
sources = [self._index] + self._fallback_vfs
1766
for source in sources:
1767
result.update(source.keys())
1771
class _KndxIndex(object):
1772
"""Manages knit index files
1774
The index is kept in memory and read on startup, to enable
1775
fast lookups of revision information. The cursor of the index
1776
file is always pointing to the end, making it easy to append
1779
_cache is a cache for fast mapping from version id to a Index
1782
_history is a cache for fast mapping from indexes to version ids.
1784
The index data format is dictionary compressed when it comes to
1785
parent references; a index entry may only have parents that with a
1786
lover index number. As a result, the index is topological sorted.
1788
Duplicate entries may be written to the index for a single version id
1789
if this is done then the latter one completely replaces the former:
1790
this allows updates to correct version and parent information.
1791
Note that the two entries may share the delta, and that successive
1792
annotations and references MUST point to the first entry.
1794
The index file on disc contains a header, followed by one line per knit
1795
record. The same revision can be present in an index file more than once.
1796
The first occurrence gets assigned a sequence number starting from 0.
1798
The format of a single line is
1799
REVISION_ID FLAGS BYTE_OFFSET LENGTH( PARENT_ID|PARENT_SEQUENCE_ID)* :\n
1800
REVISION_ID is a utf8-encoded revision id
1801
FLAGS is a comma separated list of flags about the record. Values include
1802
no-eol, line-delta, fulltext.
1803
BYTE_OFFSET is the ascii representation of the byte offset in the data file
1804
that the the compressed data starts at.
1805
LENGTH is the ascii representation of the length of the data file.
1806
PARENT_ID a utf-8 revision id prefixed by a '.' that is a parent of
1808
PARENT_SEQUENCE_ID the ascii representation of the sequence number of a
1809
revision id already in the knit that is a parent of REVISION_ID.
1810
The ' :' marker is the end of record marker.
1813
when a write is interrupted to the index file, it will result in a line
1814
that does not end in ' :'. If the ' :' is not present at the end of a line,
1815
or at the end of the file, then the record that is missing it will be
1816
ignored by the parser.
1818
When writing new records to the index file, the data is preceded by '\n'
1819
to ensure that records always start on new lines even if the last write was
1820
interrupted. As a result its normal for the last line in the index to be
1821
missing a trailing newline. One can be added with no harmful effects.
1823
:ivar _kndx_cache: dict from prefix to the old state of KnitIndex objects,
1824
where prefix is e.g. the (fileid,) for .texts instances or () for
1825
constant-mapped things like .revisions, and the old state is
1826
tuple(history_vector, cache_dict). This is used to prevent having an
1827
ABI change with the C extension that reads .kndx files.
1830
HEADER = "# bzr knit index 8\n"
1832
def __init__(self, transport, mapper, get_scope, allow_writes, is_locked):
1833
"""Create a _KndxIndex on transport using mapper."""
1834
self._transport = transport
1835
self._mapper = mapper
1836
self._get_scope = get_scope
1837
self._allow_writes = allow_writes
1838
self._is_locked = is_locked
1840
self.has_graph = True
1842
def add_records(self, records, random_id=False):
1843
"""Add multiple records to the index.
1845
:param records: a list of tuples:
1846
(key, options, access_memo, parents).
1847
:param random_id: If True the ids being added were randomly generated
1848
and no check for existence will be performed.
1851
for record in records:
1854
path = self._mapper.map(key) + '.kndx'
1855
path_keys = paths.setdefault(path, (prefix, []))
1856
path_keys[1].append(record)
1857
for path in sorted(paths):
1858
prefix, path_keys = paths[path]
1859
self._load_prefixes([prefix])
1861
orig_history = self._kndx_cache[prefix][1][:]
1862
orig_cache = self._kndx_cache[prefix][0].copy()
1865
for key, options, (_, pos, size), parents in path_keys:
1867
# kndx indices cannot be parentless.
1869
line = "\n%s %s %s %s %s :" % (
1870
key[-1], ','.join(options), pos, size,
1871
self._dictionary_compress(parents))
1872
if type(line) != str:
1873
raise AssertionError(
1874
'data must be utf8 was %s' % type(line))
1876
self._cache_key(key, options, pos, size, parents)
1877
if len(orig_history):
1878
self._transport.append_bytes(path, ''.join(lines))
1880
self._init_index(path, lines)
1882
# If any problems happen, restore the original values and re-raise
1883
self._kndx_cache[prefix] = (orig_cache, orig_history)
1886
def _cache_key(self, key, options, pos, size, parent_keys):
1887
"""Cache a version record in the history array and index cache.
1889
This is inlined into _load_data for performance. KEEP IN SYNC.
1890
(It saves 60ms, 25% of the __init__ overhead on local 4000 record
1894
version_id = key[-1]
1895
# last-element only for compatibilty with the C load_data.
1896
parents = tuple(parent[-1] for parent in parent_keys)
1897
for parent in parent_keys:
1898
if parent[:-1] != prefix:
1899
raise ValueError("mismatched prefixes for %r, %r" % (
1901
cache, history = self._kndx_cache[prefix]
1902
# only want the _history index to reference the 1st index entry
1904
if version_id not in cache:
1905
index = len(history)
1906
history.append(version_id)
1908
index = cache[version_id][5]
1909
cache[version_id] = (version_id,
1916
def check_header(self, fp):
1917
line = fp.readline()
1919
# An empty file can actually be treated as though the file doesn't
1921
raise errors.NoSuchFile(self)
1922
if line != self.HEADER:
1923
raise KnitHeaderError(badline=line, filename=self)
1925
def _check_read(self):
1926
if not self._is_locked():
1927
raise errors.ObjectNotLocked(self)
1928
if self._get_scope() != self._scope:
1931
def _check_write_ok(self):
1932
"""Assert if not writes are permitted."""
1933
if not self._is_locked():
1934
raise errors.ObjectNotLocked(self)
1935
if self._get_scope() != self._scope:
1937
if self._mode != 'w':
1938
raise errors.ReadOnlyObjectDirtiedError(self)
1940
def get_build_details(self, keys):
1941
"""Get the method, index_memo and compression parent for keys.
1943
Ghosts are omitted from the result.
1945
:param keys: An iterable of keys.
1946
:return: A dict of key:(index_memo, compression_parent, parents,
1949
opaque structure to pass to read_records to extract the raw
1952
Content that this record is built upon, may be None
1954
Logical parents of this node
1956
extra information about the content which needs to be passed to
1957
Factory.parse_record
1959
parent_map = self.get_parent_map(keys)
1962
if key not in parent_map:
1964
method = self.get_method(key)
1965
parents = parent_map[key]
1966
if method == 'fulltext':
1967
compression_parent = None
1969
compression_parent = parents[0]
1970
noeol = 'no-eol' in self.get_options(key)
1971
index_memo = self.get_position(key)
1972
result[key] = (index_memo, compression_parent,
1973
parents, (method, noeol))
1976
def get_method(self, key):
1977
"""Return compression method of specified key."""
1978
options = self.get_options(key)
1979
if 'fulltext' in options:
1981
elif 'line-delta' in options:
1984
raise errors.KnitIndexUnknownMethod(self, options)
1986
def get_options(self, key):
1987
"""Return a list representing options.
1991
prefix, suffix = self._split_key(key)
1992
self._load_prefixes([prefix])
1994
return self._kndx_cache[prefix][0][suffix][1]
1996
raise RevisionNotPresent(key, self)
1998
def get_parent_map(self, keys):
1999
"""Get a map of the parents of keys.
2001
:param keys: The keys to look up parents for.
2002
:return: A mapping from keys to parents. Absent keys are absent from
2005
# Parse what we need to up front, this potentially trades off I/O
2006
# locality (.kndx and .knit in the same block group for the same file
2007
# id) for less checking in inner loops.
2008
prefixes = set(key[:-1] for key in keys)
2009
self._load_prefixes(prefixes)
2014
suffix_parents = self._kndx_cache[prefix][0][key[-1]][4]
2018
result[key] = tuple(prefix + (suffix,) for
2019
suffix in suffix_parents)
2022
def get_position(self, key):
2023
"""Return details needed to access the version.
2025
:return: a tuple (key, data position, size) to hand to the access
2026
logic to get the record.
2028
prefix, suffix = self._split_key(key)
2029
self._load_prefixes([prefix])
2030
entry = self._kndx_cache[prefix][0][suffix]
2031
return key, entry[2], entry[3]
2033
has_key = _mod_index._has_key_from_parent_map
2035
def _init_index(self, path, extra_lines=[]):
2036
"""Initialize an index."""
2038
sio.write(self.HEADER)
2039
sio.writelines(extra_lines)
2041
self._transport.put_file_non_atomic(path, sio,
2042
create_parent_dir=True)
2043
# self._create_parent_dir)
2044
# mode=self._file_mode,
2045
# dir_mode=self._dir_mode)
2048
"""Get all the keys in the collection.
2050
The keys are not ordered.
2053
# Identify all key prefixes.
2054
# XXX: A bit hacky, needs polish.
2055
if type(self._mapper) == ConstantMapper:
2059
for quoted_relpath in self._transport.iter_files_recursive():
2060
path, ext = os.path.splitext(quoted_relpath)
2062
prefixes = [self._mapper.unmap(path) for path in relpaths]
2063
self._load_prefixes(prefixes)
2064
for prefix in prefixes:
2065
for suffix in self._kndx_cache[prefix][1]:
2066
result.add(prefix + (suffix,))
2069
def _load_prefixes(self, prefixes):
2070
"""Load the indices for prefixes."""
2072
for prefix in prefixes:
2073
if prefix not in self._kndx_cache:
2074
# the load_data interface writes to these variables.
2077
self._filename = prefix
2079
path = self._mapper.map(prefix) + '.kndx'
2080
fp = self._transport.get(path)
2082
# _load_data may raise NoSuchFile if the target knit is
2084
_load_data(self, fp)
2087
self._kndx_cache[prefix] = (self._cache, self._history)
2092
self._kndx_cache[prefix] = ({}, [])
2093
if type(self._mapper) == ConstantMapper:
2094
# preserve behaviour for revisions.kndx etc.
2095
self._init_index(path)
2100
missing_keys = _mod_index._missing_keys_from_parent_map
2102
def _partition_keys(self, keys):
2103
"""Turn keys into a dict of prefix:suffix_list."""
2106
prefix_keys = result.setdefault(key[:-1], [])
2107
prefix_keys.append(key[-1])
2110
def _dictionary_compress(self, keys):
2111
"""Dictionary compress keys.
2113
:param keys: The keys to generate references to.
2114
:return: A string representation of keys. keys which are present are
2115
dictionary compressed, and others are emitted as fulltext with a
2121
prefix = keys[0][:-1]
2122
cache = self._kndx_cache[prefix][0]
2124
if key[:-1] != prefix:
2125
# kndx indices cannot refer across partitioned storage.
2126
raise ValueError("mismatched prefixes for %r" % keys)
2127
if key[-1] in cache:
2128
# -- inlined lookup() --
2129
result_list.append(str(cache[key[-1]][5]))
2130
# -- end lookup () --
2132
result_list.append('.' + key[-1])
2133
return ' '.join(result_list)
2135
def _reset_cache(self):
2136
# Possibly this should be a LRU cache. A dictionary from key_prefix to
2137
# (cache_dict, history_vector) for parsed kndx files.
2138
self._kndx_cache = {}
2139
self._scope = self._get_scope()
2140
allow_writes = self._allow_writes()
2146
def _sort_keys_by_io(self, keys, positions):
2147
"""Figure out an optimal order to read the records for the given keys.
2149
Sort keys, grouped by index and sorted by position.
2151
:param keys: A list of keys whose records we want to read. This will be
2153
:param positions: A dict, such as the one returned by
2154
_get_components_positions()
2157
def get_sort_key(key):
2158
index_memo = positions[key][1]
2159
# Group by prefix and position. index_memo[0] is the key, so it is
2160
# (file_id, revision_id) and we don't want to sort on revision_id,
2161
# index_memo[1] is the position, and index_memo[2] is the size,
2162
# which doesn't matter for the sort
2163
return index_memo[0][:-1], index_memo[1]
2164
return keys.sort(key=get_sort_key)
2166
def _split_key(self, key):
2167
"""Split key into a prefix and suffix."""
2168
return key[:-1], key[-1]
2171
class _KnitGraphIndex(object):
2172
"""A KnitVersionedFiles index layered on GraphIndex."""
2174
def __init__(self, graph_index, is_locked, deltas=False, parents=True,
2176
"""Construct a KnitGraphIndex on a graph_index.
2178
:param graph_index: An implementation of bzrlib.index.GraphIndex.
2179
:param is_locked: A callback to check whether the object should answer
2181
:param deltas: Allow delta-compressed records.
2182
:param parents: If True, record knits parents, if not do not record
2184
:param add_callback: If not None, allow additions to the index and call
2185
this callback with a list of added GraphIndex nodes:
2186
[(node, value, node_refs), ...]
2187
:param is_locked: A callback, returns True if the index is locked and
2190
self._add_callback = add_callback
2191
self._graph_index = graph_index
2192
self._deltas = deltas
2193
self._parents = parents
2194
if deltas and not parents:
2195
# XXX: TODO: Delta tree and parent graph should be conceptually
2197
raise KnitCorrupt(self, "Cannot do delta compression without "
2199
self.has_graph = parents
2200
self._is_locked = is_locked
2203
return "%s(%r)" % (self.__class__.__name__, self._graph_index)
2205
def add_records(self, records, random_id=False):
2206
"""Add multiple records to the index.
2208
This function does not insert data into the Immutable GraphIndex
2209
backing the KnitGraphIndex, instead it prepares data for insertion by
2210
the caller and checks that it is safe to insert then calls
2211
self._add_callback with the prepared GraphIndex nodes.
2213
:param records: a list of tuples:
2214
(key, options, access_memo, parents).
2215
:param random_id: If True the ids being added were randomly generated
2216
and no check for existence will be performed.
2218
if not self._add_callback:
2219
raise errors.ReadOnlyError(self)
2220
# we hope there are no repositories with inconsistent parentage
2224
for (key, options, access_memo, parents) in records:
2226
parents = tuple(parents)
2227
index, pos, size = access_memo
2228
if 'no-eol' in options:
2232
value += "%d %d" % (pos, size)
2233
if not self._deltas:
2234
if 'line-delta' in options:
2235
raise KnitCorrupt(self, "attempt to add line-delta in non-delta knit")
2238
if 'line-delta' in options:
2239
node_refs = (parents, (parents[0],))
2241
node_refs = (parents, ())
2243
node_refs = (parents, )
2246
raise KnitCorrupt(self, "attempt to add node with parents "
2247
"in parentless index.")
2249
keys[key] = (value, node_refs)
2252
present_nodes = self._get_entries(keys)
2253
for (index, key, value, node_refs) in present_nodes:
2254
if (value[0] != keys[key][0][0] or
2255
node_refs[:1] != keys[key][1][:1]):
2256
raise KnitCorrupt(self, "inconsistent details in add_records"
2257
": %s %s" % ((value, node_refs), keys[key]))
2261
for key, (value, node_refs) in keys.iteritems():
2262
result.append((key, value, node_refs))
2264
for key, (value, node_refs) in keys.iteritems():
2265
result.append((key, value))
2266
self._add_callback(result)
2268
def _check_read(self):
2269
"""raise if reads are not permitted."""
2270
if not self._is_locked():
2271
raise errors.ObjectNotLocked(self)
2273
def _check_write_ok(self):
2274
"""Assert if writes are not permitted."""
2275
if not self._is_locked():
2276
raise errors.ObjectNotLocked(self)
2278
def _compression_parent(self, an_entry):
2279
# return the key that an_entry is compressed against, or None
2280
# Grab the second parent list (as deltas implies parents currently)
2281
compression_parents = an_entry[3][1]
2282
if not compression_parents:
2284
if len(compression_parents) != 1:
2285
raise AssertionError(
2286
"Too many compression parents: %r" % compression_parents)
2287
return compression_parents[0]
2289
def get_build_details(self, keys):
2290
"""Get the method, index_memo and compression parent for version_ids.
2292
Ghosts are omitted from the result.
2294
:param keys: An iterable of keys.
2295
:return: A dict of key:
2296
(index_memo, compression_parent, parents, record_details).
2298
opaque structure to pass to read_records to extract the raw
2301
Content that this record is built upon, may be None
2303
Logical parents of this node
2305
extra information about the content which needs to be passed to
2306
Factory.parse_record
2310
entries = self._get_entries(keys, False)
2311
for entry in entries:
2313
if not self._parents:
2316
parents = entry[3][0]
2317
if not self._deltas:
2318
compression_parent_key = None
2320
compression_parent_key = self._compression_parent(entry)
2321
noeol = (entry[2][0] == 'N')
2322
if compression_parent_key:
2323
method = 'line-delta'
2326
result[key] = (self._node_to_position(entry),
2327
compression_parent_key, parents,
2331
def _get_entries(self, keys, check_present=False):
2332
"""Get the entries for keys.
2334
:param keys: An iterable of index key tuples.
2339
for node in self._graph_index.iter_entries(keys):
2341
found_keys.add(node[1])
2343
# adapt parentless index to the rest of the code.
2344
for node in self._graph_index.iter_entries(keys):
2345
yield node[0], node[1], node[2], ()
2346
found_keys.add(node[1])
2348
missing_keys = keys.difference(found_keys)
2350
raise RevisionNotPresent(missing_keys.pop(), self)
2352
def get_method(self, key):
2353
"""Return compression method of specified key."""
2354
return self._get_method(self._get_node(key))
2356
def _get_method(self, node):
2357
if not self._deltas:
2359
if self._compression_parent(node):
2364
def _get_node(self, key):
2366
return list(self._get_entries([key]))[0]
2368
raise RevisionNotPresent(key, self)
2370
def get_options(self, key):
2371
"""Return a list representing options.
2375
node = self._get_node(key)
2376
options = [self._get_method(node)]
2377
if node[2][0] == 'N':
2378
options.append('no-eol')
2381
def get_parent_map(self, keys):
2382
"""Get a map of the parents of keys.
2384
:param keys: The keys to look up parents for.
2385
:return: A mapping from keys to parents. Absent keys are absent from
2389
nodes = self._get_entries(keys)
2393
result[node[1]] = node[3][0]
2396
result[node[1]] = None
2399
def get_position(self, key):
2400
"""Return details needed to access the version.
2402
:return: a tuple (index, data position, size) to hand to the access
2403
logic to get the record.
2405
node = self._get_node(key)
2406
return self._node_to_position(node)
2408
has_key = _mod_index._has_key_from_parent_map
2411
"""Get all the keys in the collection.
2413
The keys are not ordered.
2416
return [node[1] for node in self._graph_index.iter_all_entries()]
2418
missing_keys = _mod_index._missing_keys_from_parent_map
2420
def _node_to_position(self, node):
2421
"""Convert an index value to position details."""
2422
bits = node[2][1:].split(' ')
2423
return node[0], int(bits[0]), int(bits[1])
2425
def _sort_keys_by_io(self, keys, positions):
2426
"""Figure out an optimal order to read the records for the given keys.
2428
Sort keys, grouped by index and sorted by position.
2430
:param keys: A list of keys whose records we want to read. This will be
2432
:param positions: A dict, such as the one returned by
2433
_get_components_positions()
2436
def get_index_memo(key):
2437
# index_memo is at offset [1]. It is made up of (GraphIndex,
2438
# position, size). GI is an object, which will be unique for each
2439
# pack file. This causes us to group by pack file, then sort by
2440
# position. Size doesn't matter, but it isn't worth breaking up the
2442
return positions[key][1]
2443
return keys.sort(key=get_index_memo)
2446
class _KnitKeyAccess(object):
2447
"""Access to records in .knit files."""
2449
def __init__(self, transport, mapper):
2450
"""Create a _KnitKeyAccess with transport and mapper.
2452
:param transport: The transport the access object is rooted at.
2453
:param mapper: The mapper used to map keys to .knit files.
2455
self._transport = transport
2456
self._mapper = mapper
2458
def add_raw_records(self, key_sizes, raw_data):
2459
"""Add raw knit bytes to a storage area.
2461
The data is spooled to the container writer in one bytes-record per
2464
:param sizes: An iterable of tuples containing the key and size of each
2466
:param raw_data: A bytestring containing the data.
2467
:return: A list of memos to retrieve the record later. Each memo is an
2468
opaque index memo. For _KnitKeyAccess the memo is (key, pos,
2469
length), where the key is the record key.
2471
if type(raw_data) != str:
2472
raise AssertionError(
2473
'data must be plain bytes was %s' % type(raw_data))
2476
# TODO: This can be tuned for writing to sftp and other servers where
2477
# append() is relatively expensive by grouping the writes to each key
2479
for key, size in key_sizes:
2480
path = self._mapper.map(key)
2482
base = self._transport.append_bytes(path + '.knit',
2483
raw_data[offset:offset+size])
2484
except errors.NoSuchFile:
2485
self._transport.mkdir(osutils.dirname(path))
2486
base = self._transport.append_bytes(path + '.knit',
2487
raw_data[offset:offset+size])
2491
result.append((key, base, size))
2494
def get_raw_records(self, memos_for_retrieval):
2495
"""Get the raw bytes for a records.
2497
:param memos_for_retrieval: An iterable containing the access memo for
2498
retrieving the bytes.
2499
:return: An iterator over the bytes of the records.
2501
# first pass, group into same-index request to minimise readv's issued.
2503
current_prefix = None
2504
for (key, offset, length) in memos_for_retrieval:
2505
if current_prefix == key[:-1]:
2506
current_list.append((offset, length))
2508
if current_prefix is not None:
2509
request_lists.append((current_prefix, current_list))
2510
current_prefix = key[:-1]
2511
current_list = [(offset, length)]
2512
# handle the last entry
2513
if current_prefix is not None:
2514
request_lists.append((current_prefix, current_list))
2515
for prefix, read_vector in request_lists:
2516
path = self._mapper.map(prefix) + '.knit'
2517
for pos, data in self._transport.readv(path, read_vector):
2521
class _DirectPackAccess(object):
2522
"""Access to data in one or more packs with less translation."""
2524
def __init__(self, index_to_packs, reload_func=None):
2525
"""Create a _DirectPackAccess object.
2527
:param index_to_packs: A dict mapping index objects to the transport
2528
and file names for obtaining data.
2529
:param reload_func: A function to call if we determine that the pack
2530
files have moved and we need to reload our caches. See
2531
bzrlib.repo_fmt.pack_repo.AggregateIndex for more details.
2533
self._container_writer = None
2534
self._write_index = None
2535
self._indices = index_to_packs
2536
self._reload_func = reload_func
2538
def add_raw_records(self, key_sizes, raw_data):
2539
"""Add raw knit bytes to a storage area.
2541
The data is spooled to the container writer in one bytes-record per
2544
:param sizes: An iterable of tuples containing the key and size of each
2546
:param raw_data: A bytestring containing the data.
2547
:return: A list of memos to retrieve the record later. Each memo is an
2548
opaque index memo. For _DirectPackAccess the memo is (index, pos,
2549
length), where the index field is the write_index object supplied
2550
to the PackAccess object.
2552
if type(raw_data) != str:
2553
raise AssertionError(
2554
'data must be plain bytes was %s' % type(raw_data))
2557
for key, size in key_sizes:
2558
p_offset, p_length = self._container_writer.add_bytes_record(
2559
raw_data[offset:offset+size], [])
2561
result.append((self._write_index, p_offset, p_length))
2564
def get_raw_records(self, memos_for_retrieval):
2565
"""Get the raw bytes for a records.
2567
:param memos_for_retrieval: An iterable containing the (index, pos,
2568
length) memo for retrieving the bytes. The Pack access method
2569
looks up the pack to use for a given record in its index_to_pack
2571
:return: An iterator over the bytes of the records.
2573
# first pass, group into same-index requests
2575
current_index = None
2576
for (index, offset, length) in memos_for_retrieval:
2577
if current_index == index:
2578
current_list.append((offset, length))
2580
if current_index is not None:
2581
request_lists.append((current_index, current_list))
2582
current_index = index
2583
current_list = [(offset, length)]
2584
# handle the last entry
2585
if current_index is not None:
2586
request_lists.append((current_index, current_list))
2587
for index, offsets in request_lists:
2589
transport, path = self._indices[index]
2591
# A KeyError here indicates that someone has triggered an index
2592
# reload, and this index has gone missing, we need to start
2594
if self._reload_func is None:
2595
# If we don't have a _reload_func there is nothing that can
2598
raise errors.RetryWithNewPacks(index,
2599
reload_occurred=True,
2600
exc_info=sys.exc_info())
2602
reader = pack.make_readv_reader(transport, path, offsets)
2603
for names, read_func in reader.iter_records():
2604
yield read_func(None)
2605
except errors.NoSuchFile:
2606
# A NoSuchFile error indicates that a pack file has gone
2607
# missing on disk, we need to trigger a reload, and start over.
2608
if self._reload_func is None:
2610
raise errors.RetryWithNewPacks(transport.abspath(path),
2611
reload_occurred=False,
2612
exc_info=sys.exc_info())
2614
def set_writer(self, writer, index, transport_packname):
2615
"""Set a writer to use for adding data."""
2616
if index is not None:
2617
self._indices[index] = transport_packname
2618
self._container_writer = writer
2619
self._write_index = index
2621
def reload_or_raise(self, retry_exc):
2622
"""Try calling the reload function, or re-raise the original exception.
2624
This should be called after _DirectPackAccess raises a
2625
RetryWithNewPacks exception. This function will handle the common logic
2626
of determining when the error is fatal versus being temporary.
2627
It will also make sure that the original exception is raised, rather
2628
than the RetryWithNewPacks exception.
2630
If this function returns, then the calling function should retry
2631
whatever operation was being performed. Otherwise an exception will
2634
:param retry_exc: A RetryWithNewPacks exception.
2637
if self._reload_func is None:
2639
elif not self._reload_func():
2640
# The reload claimed that nothing changed
2641
if not retry_exc.reload_occurred:
2642
# If there wasn't an earlier reload, then we really were
2643
# expecting to find changes. We didn't find them, so this is a
2647
exc_class, exc_value, exc_traceback = retry_exc.exc_info
2648
raise exc_class, exc_value, exc_traceback
2651
# Deprecated, use PatienceSequenceMatcher instead
2652
KnitSequenceMatcher = patiencediff.PatienceSequenceMatcher
2655
def annotate_knit(knit, revision_id):
2656
"""Annotate a knit with no cached annotations.
2658
This implementation is for knits with no cached annotations.
2659
It will work for knits with cached annotations, but this is not
2662
annotator = _KnitAnnotator(knit)
2663
return iter(annotator.annotate(revision_id))
2666
class _KnitAnnotator(object):
2667
"""Build up the annotations for a text."""
2669
def __init__(self, knit):
2672
# Content objects, differs from fulltexts because of how final newlines
2673
# are treated by knits. the content objects here will always have a
2675
self._fulltext_contents = {}
2677
# Annotated lines of specific revisions
2678
self._annotated_lines = {}
2680
# Track the raw data for nodes that we could not process yet.
2681
# This maps the revision_id of the base to a list of children that will
2682
# annotated from it.
2683
self._pending_children = {}
2685
# Nodes which cannot be extracted
2686
self._ghosts = set()
2688
# Track how many children this node has, so we know if we need to keep
2690
self._annotate_children = {}
2691
self._compression_children = {}
2693
self._all_build_details = {}
2694
# The children => parent revision_id graph
2695
self._revision_id_graph = {}
2697
self._heads_provider = None
2699
self._nodes_to_keep_annotations = set()
2700
self._generations_until_keep = 100
2702
def set_generations_until_keep(self, value):
2703
"""Set the number of generations before caching a node.
2705
Setting this to -1 will cache every merge node, setting this higher
2706
will cache fewer nodes.
2708
self._generations_until_keep = value
2710
def _add_fulltext_content(self, revision_id, content_obj):
2711
self._fulltext_contents[revision_id] = content_obj
2712
# TODO: jam 20080305 It might be good to check the sha1digest here
2713
return content_obj.text()
2715
def _check_parents(self, child, nodes_to_annotate):
2716
"""Check if all parents have been processed.
2718
:param child: A tuple of (rev_id, parents, raw_content)
2719
:param nodes_to_annotate: If child is ready, add it to
2720
nodes_to_annotate, otherwise put it back in self._pending_children
2722
for parent_id in child[1]:
2723
if (parent_id not in self._annotated_lines):
2724
# This parent is present, but another parent is missing
2725
self._pending_children.setdefault(parent_id,
2729
# This one is ready to be processed
2730
nodes_to_annotate.append(child)
2732
def _add_annotation(self, revision_id, fulltext, parent_ids,
2733
left_matching_blocks=None):
2734
"""Add an annotation entry.
2736
All parents should already have been annotated.
2737
:return: A list of children that now have their parents satisfied.
2739
a = self._annotated_lines
2740
annotated_parent_lines = [a[p] for p in parent_ids]
2741
annotated_lines = list(annotate.reannotate(annotated_parent_lines,
2742
fulltext, revision_id, left_matching_blocks,
2743
heads_provider=self._get_heads_provider()))
2744
self._annotated_lines[revision_id] = annotated_lines
2745
for p in parent_ids:
2746
ann_children = self._annotate_children[p]
2747
ann_children.remove(revision_id)
2748
if (not ann_children
2749
and p not in self._nodes_to_keep_annotations):
2750
del self._annotated_lines[p]
2751
del self._all_build_details[p]
2752
if p in self._fulltext_contents:
2753
del self._fulltext_contents[p]
2754
# Now that we've added this one, see if there are any pending
2755
# deltas to be done, certainly this parent is finished
2756
nodes_to_annotate = []
2757
for child in self._pending_children.pop(revision_id, []):
2758
self._check_parents(child, nodes_to_annotate)
2759
return nodes_to_annotate
2761
def _get_build_graph(self, key):
2762
"""Get the graphs for building texts and annotations.
2764
The data you need for creating a full text may be different than the
2765
data you need to annotate that text. (At a minimum, you need both
2766
parents to create an annotation, but only need 1 parent to generate the
2769
:return: A list of (key, index_memo) records, suitable for
2770
passing to read_records_iter to start reading in the raw data fro/
2773
if key in self._annotated_lines:
2776
pending = set([key])
2781
# get all pending nodes
2783
this_iteration = pending
2784
build_details = self._knit._index.get_build_details(this_iteration)
2785
self._all_build_details.update(build_details)
2786
# new_nodes = self._knit._index._get_entries(this_iteration)
2788
for key, details in build_details.iteritems():
2789
(index_memo, compression_parent, parents,
2790
record_details) = details
2791
self._revision_id_graph[key] = parents
2792
records.append((key, index_memo))
2793
# Do we actually need to check _annotated_lines?
2794
pending.update(p for p in parents
2795
if p not in self._all_build_details)
2796
if compression_parent:
2797
self._compression_children.setdefault(compression_parent,
2800
for parent in parents:
2801
self._annotate_children.setdefault(parent,
2803
num_gens = generation - kept_generation
2804
if ((num_gens >= self._generations_until_keep)
2805
and len(parents) > 1):
2806
kept_generation = generation
2807
self._nodes_to_keep_annotations.add(key)
2809
missing_versions = this_iteration.difference(build_details.keys())
2810
self._ghosts.update(missing_versions)
2811
for missing_version in missing_versions:
2812
# add a key, no parents
2813
self._revision_id_graph[missing_version] = ()
2814
pending.discard(missing_version) # don't look for it
2815
if self._ghosts.intersection(self._compression_children):
2817
"We cannot have nodes which have a ghost compression parent:\n"
2819
"compression children: %r"
2820
% (self._ghosts, self._compression_children))
2821
# Cleanout anything that depends on a ghost so that we don't wait for
2822
# the ghost to show up
2823
for node in self._ghosts:
2824
if node in self._annotate_children:
2825
# We won't be building this node
2826
del self._annotate_children[node]
2827
# Generally we will want to read the records in reverse order, because
2828
# we find the parent nodes after the children
2832
def _annotate_records(self, records):
2833
"""Build the annotations for the listed records."""
2834
# We iterate in the order read, rather than a strict order requested
2835
# However, process what we can, and put off to the side things that
2836
# still need parents, cleaning them up when those parents are
2838
for (rev_id, record,
2839
digest) in self._knit._read_records_iter(records):
2840
if rev_id in self._annotated_lines:
2842
parent_ids = self._revision_id_graph[rev_id]
2843
parent_ids = [p for p in parent_ids if p not in self._ghosts]
2844
details = self._all_build_details[rev_id]
2845
(index_memo, compression_parent, parents,
2846
record_details) = details
2847
nodes_to_annotate = []
2848
# TODO: Remove the punning between compression parents, and
2849
# parent_ids, we should be able to do this without assuming
2851
if len(parent_ids) == 0:
2852
# There are no parents for this node, so just add it
2853
# TODO: This probably needs to be decoupled
2854
fulltext_content, delta = self._knit._factory.parse_record(
2855
rev_id, record, record_details, None)
2856
fulltext = self._add_fulltext_content(rev_id, fulltext_content)
2857
nodes_to_annotate.extend(self._add_annotation(rev_id, fulltext,
2858
parent_ids, left_matching_blocks=None))
2860
child = (rev_id, parent_ids, record)
2861
# Check if all the parents are present
2862
self._check_parents(child, nodes_to_annotate)
2863
while nodes_to_annotate:
2864
# Should we use a queue here instead of a stack?
2865
(rev_id, parent_ids, record) = nodes_to_annotate.pop()
2866
(index_memo, compression_parent, parents,
2867
record_details) = self._all_build_details[rev_id]
2869
if compression_parent is not None:
2870
comp_children = self._compression_children[compression_parent]
2871
if rev_id not in comp_children:
2872
raise AssertionError("%r not in compression children %r"
2873
% (rev_id, comp_children))
2874
# If there is only 1 child, it is safe to reuse this
2876
reuse_content = (len(comp_children) == 1
2877
and compression_parent not in
2878
self._nodes_to_keep_annotations)
2880
# Remove it from the cache since it will be changing
2881
parent_fulltext_content = self._fulltext_contents.pop(compression_parent)
2882
# Make sure to copy the fulltext since it might be
2884
parent_fulltext = list(parent_fulltext_content.text())
2886
parent_fulltext_content = self._fulltext_contents[compression_parent]
2887
parent_fulltext = parent_fulltext_content.text()
2888
comp_children.remove(rev_id)
2889
fulltext_content, delta = self._knit._factory.parse_record(
2890
rev_id, record, record_details,
2891
parent_fulltext_content,
2892
copy_base_content=(not reuse_content))
2893
fulltext = self._add_fulltext_content(rev_id,
2895
if compression_parent == parent_ids[0]:
2896
# the compression_parent is the left parent, so we can
2898
blocks = KnitContent.get_line_delta_blocks(delta,
2899
parent_fulltext, fulltext)
2901
fulltext_content = self._knit._factory.parse_fulltext(
2903
fulltext = self._add_fulltext_content(rev_id,
2905
nodes_to_annotate.extend(
2906
self._add_annotation(rev_id, fulltext, parent_ids,
2907
left_matching_blocks=blocks))
2909
def _get_heads_provider(self):
2910
"""Create a heads provider for resolving ancestry issues."""
2911
if self._heads_provider is not None:
2912
return self._heads_provider
2913
parent_provider = _mod_graph.DictParentsProvider(
2914
self._revision_id_graph)
2915
graph_obj = _mod_graph.Graph(parent_provider)
2916
head_cache = _mod_graph.FrozenHeadsCache(graph_obj)
2917
self._heads_provider = head_cache
2920
def annotate(self, key):
2921
"""Return the annotated fulltext at the given key.
2923
:param key: The key to annotate.
2925
if len(self._knit._fallback_vfs) > 0:
2926
# stacked knits can't use the fast path at present.
2927
return self._simple_annotate(key)
2930
records = self._get_build_graph(key)
2931
if key in self._ghosts:
2932
raise errors.RevisionNotPresent(key, self._knit)
2933
self._annotate_records(records)
2934
return self._annotated_lines[key]
2935
except errors.RetryWithNewPacks, e:
2936
self._knit._access.reload_or_raise(e)
2937
# The cached build_details are no longer valid
2938
self._all_build_details.clear()
2940
def _simple_annotate(self, key):
2941
"""Return annotated fulltext, rediffing from the full texts.
2943
This is slow but makes no assumptions about the repository
2944
being able to produce line deltas.
2946
# TODO: this code generates a parent maps of present ancestors; it
2947
# could be split out into a separate method, and probably should use
2948
# iter_ancestry instead. -- mbp and robertc 20080704
2949
graph = _mod_graph.Graph(self._knit)
2950
head_cache = _mod_graph.FrozenHeadsCache(graph)
2951
search = graph._make_breadth_first_searcher([key])
2955
present, ghosts = search.next_with_ghosts()
2956
except StopIteration:
2958
keys.update(present)
2959
parent_map = self._knit.get_parent_map(keys)
2961
reannotate = annotate.reannotate
2962
for record in self._knit.get_record_stream(keys, 'topological', True):
2964
fulltext = osutils.chunks_to_lines(record.get_bytes_as('chunked'))
2965
parents = parent_map[key]
2966
if parents is not None:
2967
parent_lines = [parent_cache[parent] for parent in parent_map[key]]
2970
parent_cache[key] = list(
2971
reannotate(parent_lines, fulltext, key, None, head_cache))
2973
return parent_cache[key]
2975
raise errors.RevisionNotPresent(key, self._knit)
2979
from bzrlib._knit_load_data_c import _load_data_c as _load_data
2981
from bzrlib._knit_load_data_py import _load_data_py as _load_data