1
# Copyright (C) 2005, 2006, 2007 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
"""Knit versionedfile implementation.
19
A knit is a versioned file implementation that supports efficient append only
23
lifeless: the data file is made up of "delta records". each delta record has a delta header
24
that contains; (1) a version id, (2) the size of the delta (in lines), and (3) the digest of
25
the -expanded data- (ie, the delta applied to the parent). the delta also ends with a
26
end-marker; simply "end VERSION"
28
delta can be line or full contents.a
29
... the 8's there are the index number of the annotation.
30
version robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad 7 c7d23b2a5bd6ca00e8e266cec0ec228158ee9f9e
34
8 e.set('executable', 'yes')
36
8 if elt.get('executable') == 'yes':
37
8 ie.executable = True
38
end robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad
42
09:33 < jrydberg> lifeless: each index is made up of a tuple of; version id, options, position, size, parents
43
09:33 < jrydberg> lifeless: the parents are currently dictionary compressed
44
09:33 < jrydberg> lifeless: (meaning it currently does not support ghosts)
45
09:33 < lifeless> right
46
09:33 < jrydberg> lifeless: the position and size is the range in the data file
49
so the index sequence is the dictionary compressed sequence number used
50
in the deltas to provide line annotation
55
# 10:16 < lifeless> make partial index writes safe
56
# 10:16 < lifeless> implement 'knit.check()' like weave.check()
57
# 10:17 < lifeless> record known ghosts so we can detect when they are filled in rather than the current 'reweave
59
# move sha1 out of the content so that join is faster at verifying parents
60
# record content length ?
63
from cStringIO import StringIO
64
from itertools import izip, chain
70
from zlib import Z_DEFAULT_COMPRESSION
73
from bzrlib.lazy_import import lazy_import
74
lazy_import(globals(), """
95
from bzrlib.errors import (
103
RevisionAlreadyPresent,
105
from bzrlib.graph import Graph
106
from bzrlib.osutils import (
113
from bzrlib.tsort import topo_sort
114
from bzrlib.tuned_gzip import GzipFile, bytes_to_gzip
116
from bzrlib.versionedfile import (
117
AbsentContentFactory,
121
FulltextContentFactory,
128
# TODO: Split out code specific to this format into an associated object.
130
# TODO: Can we put in some kind of value to check that the index and data
131
# files belong together?
133
# TODO: accommodate binaries, perhaps by storing a byte count
135
# TODO: function to check whole file
137
# TODO: atomically append data, then measure backwards from the cursor
138
# position after writing to work out where it was located. we may need to
139
# bypass python file buffering.
141
DATA_SUFFIX = '.knit'
142
INDEX_SUFFIX = '.kndx'
145
class KnitAdapter(object):
146
"""Base class for knit record adaption."""
148
def __init__(self, basis_vf):
149
"""Create an adapter which accesses full texts from basis_vf.
151
:param basis_vf: A versioned file to access basis texts of deltas from.
152
May be None for adapters that do not need to access basis texts.
154
self._data = KnitVersionedFiles(None, None)
155
self._annotate_factory = KnitAnnotateFactory()
156
self._plain_factory = KnitPlainFactory()
157
self._basis_vf = basis_vf
160
class FTAnnotatedToUnannotated(KnitAdapter):
161
"""An adapter from FT annotated knits to unannotated ones."""
163
def get_bytes(self, factory, annotated_compressed_bytes):
165
self._data._parse_record_unchecked(annotated_compressed_bytes)
166
content = self._annotate_factory.parse_fulltext(contents, rec[1])
167
size, bytes = self._data._record_to_data((rec[1],), rec[3], content.text())
171
class DeltaAnnotatedToUnannotated(KnitAdapter):
172
"""An adapter for deltas from annotated to unannotated."""
174
def get_bytes(self, factory, annotated_compressed_bytes):
176
self._data._parse_record_unchecked(annotated_compressed_bytes)
177
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
179
contents = self._plain_factory.lower_line_delta(delta)
180
size, bytes = self._data._record_to_data((rec[1],), rec[3], contents)
184
class FTAnnotatedToFullText(KnitAdapter):
185
"""An adapter from FT annotated knits to unannotated ones."""
187
def get_bytes(self, factory, annotated_compressed_bytes):
189
self._data._parse_record_unchecked(annotated_compressed_bytes)
190
content, delta = self._annotate_factory.parse_record(factory.key[-1],
191
contents, factory._build_details, None)
192
return ''.join(content.text())
195
class DeltaAnnotatedToFullText(KnitAdapter):
196
"""An adapter for deltas from annotated to unannotated."""
198
def get_bytes(self, factory, annotated_compressed_bytes):
200
self._data._parse_record_unchecked(annotated_compressed_bytes)
201
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
203
compression_parent = factory.parents[0]
204
basis_entry = self._basis_vf.get_record_stream(
205
[compression_parent], 'unordered', True).next()
206
if basis_entry.storage_kind == 'absent':
207
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
208
basis_lines = split_lines(basis_entry.get_bytes_as('fulltext'))
209
# Manually apply the delta because we have one annotated content and
211
basis_content = PlainKnitContent(basis_lines, compression_parent)
212
basis_content.apply_delta(delta, rec[1])
213
basis_content._should_strip_eol = factory._build_details[1]
214
return ''.join(basis_content.text())
217
class FTPlainToFullText(KnitAdapter):
218
"""An adapter from FT plain knits to unannotated ones."""
220
def get_bytes(self, factory, compressed_bytes):
222
self._data._parse_record_unchecked(compressed_bytes)
223
content, delta = self._plain_factory.parse_record(factory.key[-1],
224
contents, factory._build_details, None)
225
return ''.join(content.text())
228
class DeltaPlainToFullText(KnitAdapter):
229
"""An adapter for deltas from annotated to unannotated."""
231
def get_bytes(self, factory, compressed_bytes):
233
self._data._parse_record_unchecked(compressed_bytes)
234
delta = self._plain_factory.parse_line_delta(contents, rec[1])
235
compression_parent = factory.parents[0]
236
# XXX: string splitting overhead.
237
basis_entry = self._basis_vf.get_record_stream(
238
[compression_parent], 'unordered', True).next()
239
if basis_entry.storage_kind == 'absent':
240
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
241
basis_lines = split_lines(basis_entry.get_bytes_as('fulltext'))
242
basis_content = PlainKnitContent(basis_lines, compression_parent)
243
# Manually apply the delta because we have one annotated content and
245
content, _ = self._plain_factory.parse_record(rec[1], contents,
246
factory._build_details, basis_content)
247
return ''.join(content.text())
250
class KnitContentFactory(ContentFactory):
251
"""Content factory for streaming from knits.
253
:seealso ContentFactory:
256
def __init__(self, key, parents, build_details, sha1, raw_record,
257
annotated, knit=None):
258
"""Create a KnitContentFactory for key.
261
:param parents: The parents.
262
:param build_details: The build details as returned from
264
:param sha1: The sha1 expected from the full text of this object.
265
:param raw_record: The bytes of the knit data from disk.
266
:param annotated: True if the raw data is annotated.
268
ContentFactory.__init__(self)
271
self.parents = parents
272
if build_details[0] == 'line-delta':
277
annotated_kind = 'annotated-'
280
self.storage_kind = 'knit-%s%s-gz' % (annotated_kind, kind)
281
self._raw_record = raw_record
282
self._build_details = build_details
285
def get_bytes_as(self, storage_kind):
286
if storage_kind == self.storage_kind:
287
return self._raw_record
288
if storage_kind == 'fulltext' and self._knit is not None:
289
return self._knit.get_text(self.key[0])
291
raise errors.UnavailableRepresentation(self.key, storage_kind,
295
class KnitContent(object):
296
"""Content of a knit version to which deltas can be applied.
298
This is always stored in memory as a list of lines with \n at the end,
299
plus a flag saying if the final ending is really there or not, because that
300
corresponds to the on-disk knit representation.
304
self._should_strip_eol = False
306
def apply_delta(self, delta, new_version_id):
307
"""Apply delta to this object to become new_version_id."""
308
raise NotImplementedError(self.apply_delta)
310
def line_delta_iter(self, new_lines):
311
"""Generate line-based delta from this content to new_lines."""
312
new_texts = new_lines.text()
313
old_texts = self.text()
314
s = patiencediff.PatienceSequenceMatcher(None, old_texts, new_texts)
315
for tag, i1, i2, j1, j2 in s.get_opcodes():
318
# ofrom, oto, length, data
319
yield i1, i2, j2 - j1, new_lines._lines[j1:j2]
321
def line_delta(self, new_lines):
322
return list(self.line_delta_iter(new_lines))
325
def get_line_delta_blocks(knit_delta, source, target):
326
"""Extract SequenceMatcher.get_matching_blocks() from a knit delta"""
327
target_len = len(target)
330
for s_begin, s_end, t_len, new_text in knit_delta:
331
true_n = s_begin - s_pos
334
# knit deltas do not provide reliable info about whether the
335
# last line of a file matches, due to eol handling.
336
if source[s_pos + n -1] != target[t_pos + n -1]:
339
yield s_pos, t_pos, n
340
t_pos += t_len + true_n
342
n = target_len - t_pos
344
if source[s_pos + n -1] != target[t_pos + n -1]:
347
yield s_pos, t_pos, n
348
yield s_pos + (target_len - t_pos), target_len, 0
351
class AnnotatedKnitContent(KnitContent):
352
"""Annotated content."""
354
def __init__(self, lines):
355
KnitContent.__init__(self)
359
"""Return a list of (origin, text) for each content line."""
360
lines = self._lines[:]
361
if self._should_strip_eol:
362
origin, last_line = lines[-1]
363
lines[-1] = (origin, last_line.rstrip('\n'))
366
def apply_delta(self, delta, new_version_id):
367
"""Apply delta to this object to become new_version_id."""
370
for start, end, count, delta_lines in delta:
371
lines[offset+start:offset+end] = delta_lines
372
offset = offset + (start - end) + count
376
lines = [text for origin, text in self._lines]
377
except ValueError, e:
378
# most commonly (only?) caused by the internal form of the knit
379
# missing annotation information because of a bug - see thread
381
raise KnitCorrupt(self,
382
"line in annotated knit missing annotation information: %s"
384
if self._should_strip_eol:
385
lines[-1] = lines[-1].rstrip('\n')
389
return AnnotatedKnitContent(self._lines[:])
392
class PlainKnitContent(KnitContent):
393
"""Unannotated content.
395
When annotate[_iter] is called on this content, the same version is reported
396
for all lines. Generally, annotate[_iter] is not useful on PlainKnitContent
400
def __init__(self, lines, version_id):
401
KnitContent.__init__(self)
403
self._version_id = version_id
406
"""Return a list of (origin, text) for each content line."""
407
return [(self._version_id, line) for line in self._lines]
409
def apply_delta(self, delta, new_version_id):
410
"""Apply delta to this object to become new_version_id."""
413
for start, end, count, delta_lines in delta:
414
lines[offset+start:offset+end] = delta_lines
415
offset = offset + (start - end) + count
416
self._version_id = new_version_id
419
return PlainKnitContent(self._lines[:], self._version_id)
423
if self._should_strip_eol:
425
lines[-1] = lines[-1].rstrip('\n')
429
class _KnitFactory(object):
430
"""Base class for common Factory functions."""
432
def parse_record(self, version_id, record, record_details,
433
base_content, copy_base_content=True):
434
"""Parse a record into a full content object.
436
:param version_id: The official version id for this content
437
:param record: The data returned by read_records_iter()
438
:param record_details: Details about the record returned by
440
:param base_content: If get_build_details returns a compression_parent,
441
you must return a base_content here, else use None
442
:param copy_base_content: When building from the base_content, decide
443
you can either copy it and return a new object, or modify it in
445
:return: (content, delta) A Content object and possibly a line-delta,
448
method, noeol = record_details
449
if method == 'line-delta':
450
if copy_base_content:
451
content = base_content.copy()
453
content = base_content
454
delta = self.parse_line_delta(record, version_id)
455
content.apply_delta(delta, version_id)
457
content = self.parse_fulltext(record, version_id)
459
content._should_strip_eol = noeol
460
return (content, delta)
463
class KnitAnnotateFactory(_KnitFactory):
464
"""Factory for creating annotated Content objects."""
468
def make(self, lines, version_id):
469
num_lines = len(lines)
470
return AnnotatedKnitContent(zip([version_id] * num_lines, lines))
472
def parse_fulltext(self, content, version_id):
473
"""Convert fulltext to internal representation
475
fulltext content is of the format
476
revid(utf8) plaintext\n
477
internal representation is of the format:
480
# TODO: jam 20070209 The tests expect this to be returned as tuples,
481
# but the code itself doesn't really depend on that.
482
# Figure out a way to not require the overhead of turning the
483
# list back into tuples.
484
lines = [tuple(line.split(' ', 1)) for line in content]
485
return AnnotatedKnitContent(lines)
487
def parse_line_delta_iter(self, lines):
488
return iter(self.parse_line_delta(lines))
490
def parse_line_delta(self, lines, version_id, plain=False):
491
"""Convert a line based delta into internal representation.
493
line delta is in the form of:
494
intstart intend intcount
496
revid(utf8) newline\n
497
internal representation is
498
(start, end, count, [1..count tuples (revid, newline)])
500
:param plain: If True, the lines are returned as a plain
501
list without annotations, not as a list of (origin, content) tuples, i.e.
502
(start, end, count, [1..count newline])
509
def cache_and_return(line):
510
origin, text = line.split(' ', 1)
511
return cache.setdefault(origin, origin), text
513
# walk through the lines parsing.
514
# Note that the plain test is explicitly pulled out of the
515
# loop to minimise any performance impact
518
start, end, count = [int(n) for n in header.split(',')]
519
contents = [next().split(' ', 1)[1] for i in xrange(count)]
520
result.append((start, end, count, contents))
523
start, end, count = [int(n) for n in header.split(',')]
524
contents = [tuple(next().split(' ', 1)) for i in xrange(count)]
525
result.append((start, end, count, contents))
528
def get_fulltext_content(self, lines):
529
"""Extract just the content lines from a fulltext."""
530
return (line.split(' ', 1)[1] for line in lines)
532
def get_linedelta_content(self, lines):
533
"""Extract just the content from a line delta.
535
This doesn't return all of the extra information stored in a delta.
536
Only the actual content lines.
541
header = header.split(',')
542
count = int(header[2])
543
for i in xrange(count):
544
origin, text = next().split(' ', 1)
547
def lower_fulltext(self, content):
548
"""convert a fulltext content record into a serializable form.
550
see parse_fulltext which this inverts.
552
# TODO: jam 20070209 We only do the caching thing to make sure that
553
# the origin is a valid utf-8 line, eventually we could remove it
554
return ['%s %s' % (o, t) for o, t in content._lines]
556
def lower_line_delta(self, delta):
557
"""convert a delta into a serializable form.
559
See parse_line_delta which this inverts.
561
# TODO: jam 20070209 We only do the caching thing to make sure that
562
# the origin is a valid utf-8 line, eventually we could remove it
564
for start, end, c, lines in delta:
565
out.append('%d,%d,%d\n' % (start, end, c))
566
out.extend(origin + ' ' + text
567
for origin, text in lines)
570
def annotate(self, knit, key):
571
content = knit._get_content(key)
572
# adjust for the fact that serialised annotations are only key suffixes
574
if type(key) == tuple:
576
origins = content.annotate()
578
for origin, line in origins:
579
result.append((prefix + (origin,), line))
582
# XXX: This smells a bit. Why would key ever be a non-tuple here?
583
# Aren't keys defined to be tuples? -- spiv 20080618
584
return content.annotate()
587
class KnitPlainFactory(_KnitFactory):
588
"""Factory for creating plain Content objects."""
592
def make(self, lines, version_id):
593
return PlainKnitContent(lines, version_id)
595
def parse_fulltext(self, content, version_id):
596
"""This parses an unannotated fulltext.
598
Note that this is not a noop - the internal representation
599
has (versionid, line) - its just a constant versionid.
601
return self.make(content, version_id)
603
def parse_line_delta_iter(self, lines, version_id):
605
num_lines = len(lines)
606
while cur < num_lines:
609
start, end, c = [int(n) for n in header.split(',')]
610
yield start, end, c, lines[cur:cur+c]
613
def parse_line_delta(self, lines, version_id):
614
return list(self.parse_line_delta_iter(lines, version_id))
616
def get_fulltext_content(self, lines):
617
"""Extract just the content lines from a fulltext."""
620
def get_linedelta_content(self, lines):
621
"""Extract just the content from a line delta.
623
This doesn't return all of the extra information stored in a delta.
624
Only the actual content lines.
629
header = header.split(',')
630
count = int(header[2])
631
for i in xrange(count):
634
def lower_fulltext(self, content):
635
return content.text()
637
def lower_line_delta(self, delta):
639
for start, end, c, lines in delta:
640
out.append('%d,%d,%d\n' % (start, end, c))
644
def annotate(self, knit, key):
645
annotator = _KnitAnnotator(knit)
646
return annotator.annotate(key)
650
def make_file_factory(annotated, mapper):
651
"""Create a factory for creating a file based KnitVersionedFiles.
653
This is only functional enough to run interface tests, it doesn't try to
654
provide a full pack environment.
656
:param annotated: knit annotations are wanted.
657
:param mapper: The mapper from keys to paths.
659
def factory(transport):
660
index = _KndxIndex(transport, mapper, lambda:None, lambda:True, lambda:True)
661
access = _KnitKeyAccess(transport, mapper)
662
return KnitVersionedFiles(index, access, annotated=annotated)
666
def make_pack_factory(graph, delta, keylength):
667
"""Create a factory for creating a pack based VersionedFiles.
669
This is only functional enough to run interface tests, it doesn't try to
670
provide a full pack environment.
672
:param graph: Store a graph.
673
:param delta: Delta compress contents.
674
:param keylength: How long should keys be.
676
def factory(transport):
677
parents = graph or delta
683
max_delta_chain = 200
686
graph_index = _mod_index.InMemoryGraphIndex(reference_lists=ref_length,
687
key_elements=keylength)
688
stream = transport.open_write_stream('newpack')
689
writer = pack.ContainerWriter(stream.write)
691
index = _KnitGraphIndex(graph_index, lambda:True, parents=parents,
692
deltas=delta, add_callback=graph_index.add_nodes)
693
access = _DirectPackAccess({})
694
access.set_writer(writer, graph_index, (transport, 'newpack'))
695
result = KnitVersionedFiles(index, access,
696
max_delta_chain=max_delta_chain)
697
result.stream = stream
698
result.writer = writer
703
def cleanup_pack_knit(versioned_files):
704
versioned_files.stream.close()
705
versioned_files.writer.end()
708
class KnitVersionedFiles(VersionedFiles):
709
"""Storage for many versioned files using knit compression.
711
Backend storage is managed by indices and data objects.
714
def __init__(self, index, data_access, max_delta_chain=200,
716
"""Create a KnitVersionedFiles with index and data_access.
718
:param index: The index for the knit data.
719
:param data_access: The access object to store and retrieve knit
721
:param max_delta_chain: The maximum number of deltas to permit during
722
insertion. Set to 0 to prohibit the use of deltas.
723
:param annotated: Set to True to cause annotations to be calculated and
724
stored during insertion.
727
self._access = data_access
728
self._max_delta_chain = max_delta_chain
730
self._factory = KnitAnnotateFactory()
732
self._factory = KnitPlainFactory()
733
self._fallback_vfs = []
735
def add_fallback_versioned_files(self, a_versioned_files):
736
"""Add a source of texts for texts not present in this knit.
738
:param a_versioned_files: A VersionedFiles object.
740
self._fallback_vfs.append(a_versioned_files)
742
def add_lines(self, key, parents, lines, parent_texts=None,
743
left_matching_blocks=None, nostore_sha=None, random_id=False,
745
"""See VersionedFiles.add_lines()."""
746
self._index._check_write_ok()
747
self._check_add(key, lines, random_id, check_content)
749
# The caller might pass None if there is no graph data, but kndx
750
# indexes can't directly store that, so we give them
751
# an empty tuple instead.
753
return self._add(key, lines, parents,
754
parent_texts, left_matching_blocks, nostore_sha, random_id)
756
def _add(self, key, lines, parents, parent_texts,
757
left_matching_blocks, nostore_sha, random_id):
758
"""Add a set of lines on top of version specified by parents.
760
Any versions not present will be converted into ghosts.
762
# first thing, if the content is something we don't need to store, find
764
line_bytes = ''.join(lines)
765
digest = sha_string(line_bytes)
766
if nostore_sha == digest:
767
raise errors.ExistingContent
770
if parent_texts is None:
772
# Do a single query to ascertain parent presence.
773
present_parent_map = self.get_parent_map(parents)
774
for parent in parents:
775
if parent in present_parent_map:
776
present_parents.append(parent)
778
# Currently we can only compress against the left most present parent.
779
if (len(present_parents) == 0 or
780
present_parents[0] != parents[0]):
783
# To speed the extract of texts the delta chain is limited
784
# to a fixed number of deltas. This should minimize both
785
# I/O and the time spend applying deltas.
786
delta = self._check_should_delta(present_parents[0])
788
text_length = len(line_bytes)
791
if lines[-1][-1] != '\n':
792
# copy the contents of lines.
794
options.append('no-eol')
795
lines[-1] = lines[-1] + '\n'
799
if type(element) != str:
800
raise TypeError("key contains non-strings: %r" % (key,))
801
# Knit hunks are still last-element only
803
content = self._factory.make(lines, version_id)
804
if 'no-eol' in options:
805
# Hint to the content object that its text() call should strip the
807
content._should_strip_eol = True
808
if delta or (self._factory.annotated and len(present_parents) > 0):
809
# Merge annotations from parent texts if needed.
810
delta_hunks = self._merge_annotations(content, present_parents,
811
parent_texts, delta, self._factory.annotated,
812
left_matching_blocks)
815
options.append('line-delta')
816
store_lines = self._factory.lower_line_delta(delta_hunks)
817
size, bytes = self._record_to_data(key, digest,
820
options.append('fulltext')
821
# isinstance is slower and we have no hierarchy.
822
if self._factory.__class__ == KnitPlainFactory:
823
# Use the already joined bytes saving iteration time in
825
size, bytes = self._record_to_data(key, digest,
828
# get mixed annotation + content and feed it into the
830
store_lines = self._factory.lower_fulltext(content)
831
size, bytes = self._record_to_data(key, digest,
834
access_memo = self._access.add_raw_records([(key, size)], bytes)[0]
835
self._index.add_records(
836
((key, options, access_memo, parents),),
838
return digest, text_length, content
840
def annotate(self, key):
841
"""See VersionedFiles.annotate."""
842
return self._factory.annotate(self, key)
844
def check(self, progress_bar=None):
845
"""See VersionedFiles.check()."""
846
# This doesn't actually test extraction of everything, but that will
847
# impact 'bzr check' substantially, and needs to be integrated with
848
# care. However, it does check for the obvious problem of a delta with
851
parent_map = self.get_parent_map(keys)
853
if self._index.get_method(key) != 'fulltext':
854
compression_parent = parent_map[key][0]
855
if compression_parent not in parent_map:
856
raise errors.KnitCorrupt(self,
857
"Missing basis parent %s for %s" % (
858
compression_parent, key))
860
def _check_add(self, key, lines, random_id, check_content):
861
"""check that version_id and lines are safe to add."""
863
if contains_whitespace(version_id):
864
raise InvalidRevisionId(version_id, self)
865
self.check_not_reserved_id(version_id)
866
# TODO: If random_id==False and the key is already present, we should
867
# probably check that the existing content is identical to what is
868
# being inserted, and otherwise raise an exception. This would make
869
# the bundle code simpler.
871
self._check_lines_not_unicode(lines)
872
self._check_lines_are_lines(lines)
874
def _check_header(self, key, line):
875
rec = self._split_header(line)
876
self._check_header_version(rec, key[-1])
879
def _check_header_version(self, rec, version_id):
880
"""Checks the header version on original format knit records.
882
These have the last component of the key embedded in the record.
884
if rec[1] != version_id:
885
raise KnitCorrupt(self,
886
'unexpected version, wanted %r, got %r' % (version_id, rec[1]))
888
def _check_should_delta(self, parent):
889
"""Iterate back through the parent listing, looking for a fulltext.
891
This is used when we want to decide whether to add a delta or a new
892
fulltext. It searches for _max_delta_chain parents. When it finds a
893
fulltext parent, it sees if the total size of the deltas leading up to
894
it is large enough to indicate that we want a new full text anyway.
896
Return True if we should create a new delta, False if we should use a
901
for count in xrange(self._max_delta_chain):
902
# XXX: Collapse these two queries:
904
method = self._index.get_method(parent)
905
except RevisionNotPresent:
906
# Some basis is not locally present: always delta
908
index, pos, size = self._index.get_position(parent)
909
if method == 'fulltext':
913
# We don't explicitly check for presence because this is in an
914
# inner loop, and if it's missing it'll fail anyhow.
915
# TODO: This should be asking for compression parent, not graph
917
parent = self._index.get_parent_map([parent])[parent][0]
919
# We couldn't find a fulltext, so we must create a new one
921
# Simple heuristic - if the total I/O wold be greater as a delta than
922
# the originally installed fulltext, we create a new fulltext.
923
return fulltext_size > delta_size
925
def _build_details_to_components(self, build_details):
926
"""Convert a build_details tuple to a position tuple."""
927
# record_details, access_memo, compression_parent
928
return build_details[3], build_details[0], build_details[1]
930
def _get_components_positions(self, keys, allow_missing=False):
931
"""Produce a map of position data for the components of keys.
933
This data is intended to be used for retrieving the knit records.
935
A dict of key to (record_details, index_memo, next, parents) is
937
method is the way referenced data should be applied.
938
index_memo is the handle to pass to the data access to actually get the
940
next is the build-parent of the version, or None for fulltexts.
941
parents is the version_ids of the parents of this version
943
:param allow_missing: If True do not raise an error on a missing component,
947
pending_components = keys
948
while pending_components:
949
build_details = self._index.get_build_details(pending_components)
950
current_components = set(pending_components)
951
pending_components = set()
952
for key, details in build_details.iteritems():
953
(index_memo, compression_parent, parents,
954
record_details) = details
955
method = record_details[0]
956
if compression_parent is not None:
957
pending_components.add(compression_parent)
958
component_data[key] = self._build_details_to_components(details)
959
missing = current_components.difference(build_details)
960
if missing and not allow_missing:
961
raise errors.RevisionNotPresent(missing.pop(), self)
962
return component_data
964
def _get_content(self, key, parent_texts={}):
965
"""Returns a content object that makes up the specified
967
cached_version = parent_texts.get(key, None)
968
if cached_version is not None:
969
# Ensure the cache dict is valid.
970
if not self.get_parent_map([key]):
971
raise RevisionNotPresent(key, self)
972
return cached_version
973
text_map, contents_map = self._get_content_maps([key])
974
return contents_map[key]
976
def _get_content_maps(self, keys, nonlocal_keys=None):
977
"""Produce maps of text and KnitContents
979
:param keys: The keys to produce content maps for.
980
:param nonlocal_keys: An iterable of keys(possibly intersecting keys)
981
which are known to not be in this knit, but rather in one of the
983
:return: (text_map, content_map) where text_map contains the texts for
984
the requested versions and content_map contains the KnitContents.
986
# FUTURE: This function could be improved for the 'extract many' case
987
# by tracking each component and only doing the copy when the number of
988
# children than need to apply delta's to it is > 1 or it is part of the
991
multiple_versions = len(keys) != 1
992
record_map = self._get_record_map(keys, allow_missing=True)
997
if nonlocal_keys is None:
998
nonlocal_keys = set()
1000
nonlocal_keys = frozenset(nonlocal_keys)
1001
missing_keys = set(nonlocal_keys)
1002
for source in self._fallback_vfs:
1003
if not missing_keys:
1005
for record in source.get_record_stream(missing_keys,
1007
if record.storage_kind == 'absent':
1009
missing_keys.remove(record.key)
1010
lines = split_lines(record.get_bytes_as('fulltext'))
1011
text_map[record.key] = lines
1012
content_map[record.key] = PlainKnitContent(lines, record.key)
1013
if record.key in keys:
1014
final_content[record.key] = content_map[record.key]
1016
if key in nonlocal_keys:
1021
while cursor is not None:
1023
record, record_details, digest, next = record_map[cursor]
1025
raise RevisionNotPresent(cursor, self)
1026
components.append((cursor, record, record_details, digest))
1028
if cursor in content_map:
1029
# no need to plan further back
1030
components.append((cursor, None, None, None))
1034
for (component_id, record, record_details,
1035
digest) in reversed(components):
1036
if component_id in content_map:
1037
content = content_map[component_id]
1039
content, delta = self._factory.parse_record(key[-1],
1040
record, record_details, content,
1041
copy_base_content=multiple_versions)
1042
if multiple_versions:
1043
content_map[component_id] = content
1045
final_content[key] = content
1047
# digest here is the digest from the last applied component.
1048
text = content.text()
1049
actual_sha = sha_strings(text)
1050
if actual_sha != digest:
1051
raise KnitCorrupt(self,
1053
'\n of reconstructed text does not match'
1055
'\n for version %s' %
1056
(actual_sha, digest, key))
1057
text_map[key] = text
1058
return text_map, final_content
1060
def get_parent_map(self, keys):
1061
"""Get a map of the parents of keys.
1063
:param keys: The keys to look up parents for.
1064
:return: A mapping from keys to parents. Absent keys are absent from
1067
return self._get_parent_map_with_sources(keys)[0]
1069
def _get_parent_map_with_sources(self, keys):
1070
"""Get a map of the parents of keys.
1072
:param keys: The keys to look up parents for.
1073
:return: A tuple. The first element is a mapping from keys to parents.
1074
Absent keys are absent from the mapping. The second element is a
1075
list with the locations each key was found in. The first element
1076
is the in-this-knit parents, the second the first fallback source,
1080
sources = [self._index] + self._fallback_vfs
1083
for source in sources:
1086
new_result = source.get_parent_map(missing)
1087
source_results.append(new_result)
1088
result.update(new_result)
1089
missing.difference_update(set(new_result))
1090
return result, source_results
1092
def _get_record_map(self, keys, allow_missing=False):
1093
"""Produce a dictionary of knit records.
1095
:return: {key:(record, record_details, digest, next)}
1097
data returned from read_records
1099
opaque information to pass to parse_record
1101
SHA1 digest of the full text after all steps are done
1103
build-parent of the version, i.e. the leftmost ancestor.
1104
Will be None if the record is not a delta.
1105
:param keys: The keys to build a map for
1106
:param allow_missing: If some records are missing, rather than
1107
error, just return the data that could be generated.
1109
position_map = self._get_components_positions(keys,
1110
allow_missing=allow_missing)
1111
# key = component_id, r = record_details, i_m = index_memo, n = next
1112
records = [(key, i_m) for key, (r, i_m, n)
1113
in position_map.iteritems()]
1115
for key, record, digest in \
1116
self._read_records_iter(records):
1117
(record_details, index_memo, next) = position_map[key]
1118
record_map[key] = record, record_details, digest, next
1121
def get_record_stream(self, keys, ordering, include_delta_closure):
1122
"""Get a stream of records for keys.
1124
:param keys: The keys to include.
1125
:param ordering: Either 'unordered' or 'topological'. A topologically
1126
sorted stream has compression parents strictly before their
1128
:param include_delta_closure: If True then the closure across any
1129
compression parents will be included (in the opaque data).
1130
:return: An iterator of ContentFactory objects, each of which is only
1131
valid until the iterator is advanced.
1133
# keys might be a generator
1137
if not self._index.has_graph:
1138
# Cannot topological order when no graph has been stored.
1139
ordering = 'unordered'
1140
if include_delta_closure:
1141
positions = self._get_components_positions(keys, allow_missing=True)
1143
build_details = self._index.get_build_details(keys)
1145
# (record_details, access_memo, compression_parent_key)
1146
positions = dict((key, self._build_details_to_components(details))
1147
for key, details in build_details.iteritems())
1148
absent_keys = keys.difference(set(positions))
1149
# There may be more absent keys : if we're missing the basis component
1150
# and are trying to include the delta closure.
1151
if include_delta_closure:
1152
needed_from_fallback = set()
1153
# Build up reconstructable_keys dict. key:True in this dict means
1154
# the key can be reconstructed.
1155
reconstructable_keys = {}
1159
chain = [key, positions[key][2]]
1161
needed_from_fallback.add(key)
1164
while chain[-1] is not None:
1165
if chain[-1] in reconstructable_keys:
1166
result = reconstructable_keys[chain[-1]]
1170
chain.append(positions[chain[-1]][2])
1172
# missing basis component
1173
needed_from_fallback.add(chain[-1])
1176
for chain_key in chain[:-1]:
1177
reconstructable_keys[chain_key] = result
1179
needed_from_fallback.add(key)
1180
# Double index lookups here : need a unified api ?
1181
global_map, parent_maps = self._get_parent_map_with_sources(keys)
1182
if ordering == 'topological':
1183
# Global topological sort
1184
present_keys = topo_sort(global_map)
1185
# Now group by source:
1187
current_source = None
1188
for key in present_keys:
1189
for parent_map in parent_maps:
1190
if key in parent_map:
1191
key_source = parent_map
1193
if current_source is not key_source:
1194
source_keys.append((key_source, []))
1195
current_source = key_source
1196
source_keys[-1][1].append(key)
1198
# Just group by source; remote sources first.
1201
for parent_map in reversed(parent_maps):
1202
source_keys.append((parent_map, []))
1203
for key in parent_map:
1204
present_keys.append(key)
1205
source_keys[-1][1].append(key)
1206
absent_keys = keys - set(global_map)
1207
for key in absent_keys:
1208
yield AbsentContentFactory(key)
1209
# restrict our view to the keys we can answer.
1210
# XXX: Memory: TODO: batch data here to cap buffered data at (say) 1MB.
1211
# XXX: At that point we need to consider the impact of double reads by
1212
# utilising components multiple times.
1213
if include_delta_closure:
1214
# XXX: get_content_maps performs its own index queries; allow state
1216
text_map, _ = self._get_content_maps(present_keys,
1217
needed_from_fallback - absent_keys)
1218
for key in present_keys:
1219
yield FulltextContentFactory(key, global_map[key], None,
1220
''.join(text_map[key]))
1222
for source, keys in source_keys:
1223
if source is parent_maps[0]:
1224
# this KnitVersionedFiles
1225
records = [(key, positions[key][1]) for key in keys]
1226
for key, raw_data, sha1 in self._read_records_iter_raw(records):
1227
(record_details, index_memo, _) = positions[key]
1228
yield KnitContentFactory(key, global_map[key],
1229
record_details, sha1, raw_data, self._factory.annotated, None)
1231
vf = self._fallback_vfs[parent_maps.index(source) - 1]
1232
for record in vf.get_record_stream(keys, ordering,
1233
include_delta_closure):
1236
def get_sha1s(self, keys):
1237
"""See VersionedFiles.get_sha1s()."""
1239
record_map = self._get_record_map(missing, allow_missing=True)
1241
for key, details in record_map.iteritems():
1242
if key not in missing:
1244
# record entry 2 is the 'digest'.
1245
result[key] = details[2]
1246
missing.difference_update(set(result))
1247
for source in self._fallback_vfs:
1250
new_result = source.get_sha1s(missing)
1251
result.update(new_result)
1252
missing.difference_update(set(new_result))
1255
def insert_record_stream(self, stream):
1256
"""Insert a record stream into this container.
1258
:param stream: A stream of records to insert.
1260
:seealso VersionedFiles.get_record_stream:
1262
def get_adapter(adapter_key):
1264
return adapters[adapter_key]
1266
adapter_factory = adapter_registry.get(adapter_key)
1267
adapter = adapter_factory(self)
1268
adapters[adapter_key] = adapter
1270
if self._factory.annotated:
1271
# self is annotated, we need annotated knits to use directly.
1272
annotated = "annotated-"
1275
# self is not annotated, but we can strip annotations cheaply.
1277
convertibles = set(["knit-annotated-ft-gz"])
1278
if self._max_delta_chain:
1279
convertibles.add("knit-annotated-delta-gz")
1280
# The set of types we can cheaply adapt without needing basis texts.
1281
native_types = set()
1282
if self._max_delta_chain:
1283
native_types.add("knit-%sdelta-gz" % annotated)
1284
native_types.add("knit-%sft-gz" % annotated)
1285
knit_types = native_types.union(convertibles)
1287
# Buffer all index entries that we can't add immediately because their
1288
# basis parent is missing. We don't buffer all because generating
1289
# annotations may require access to some of the new records. However we
1290
# can't generate annotations from new deltas until their basis parent
1291
# is present anyway, so we get away with not needing an index that
1292
# includes the new keys.
1293
# key = basis_parent, value = index entry to add
1294
buffered_index_entries = {}
1295
for record in stream:
1296
parents = record.parents
1297
# Raise an error when a record is missing.
1298
if record.storage_kind == 'absent':
1299
raise RevisionNotPresent([record.key], self)
1300
if record.storage_kind in knit_types:
1301
if record.storage_kind not in native_types:
1303
adapter_key = (record.storage_kind, "knit-delta-gz")
1304
adapter = get_adapter(adapter_key)
1306
adapter_key = (record.storage_kind, "knit-ft-gz")
1307
adapter = get_adapter(adapter_key)
1308
bytes = adapter.get_bytes(
1309
record, record.get_bytes_as(record.storage_kind))
1311
bytes = record.get_bytes_as(record.storage_kind)
1312
options = [record._build_details[0]]
1313
if record._build_details[1]:
1314
options.append('no-eol')
1315
# Just blat it across.
1316
# Note: This does end up adding data on duplicate keys. As
1317
# modern repositories use atomic insertions this should not
1318
# lead to excessive growth in the event of interrupted fetches.
1319
# 'knit' repositories may suffer excessive growth, but as a
1320
# deprecated format this is tolerable. It can be fixed if
1321
# needed by in the kndx index support raising on a duplicate
1322
# add with identical parents and options.
1323
access_memo = self._access.add_raw_records(
1324
[(record.key, len(bytes))], bytes)[0]
1325
index_entry = (record.key, options, access_memo, parents)
1327
if 'fulltext' not in options:
1328
basis_parent = parents[0]
1329
# Note that pack backed knits don't need to buffer here
1330
# because they buffer all writes to the transaction level,
1331
# but we don't expose that difference at the index level. If
1332
# the query here has sufficient cost to show up in
1333
# profiling we should do that.
1334
if basis_parent not in self.get_parent_map([basis_parent]):
1335
pending = buffered_index_entries.setdefault(
1337
pending.append(index_entry)
1340
self._index.add_records([index_entry])
1341
elif record.storage_kind == 'fulltext':
1342
self.add_lines(record.key, parents,
1343
split_lines(record.get_bytes_as('fulltext')))
1345
adapter_key = record.storage_kind, 'fulltext'
1346
adapter = get_adapter(adapter_key)
1347
lines = split_lines(adapter.get_bytes(
1348
record, record.get_bytes_as(record.storage_kind)))
1350
self.add_lines(record.key, parents, lines)
1351
except errors.RevisionAlreadyPresent:
1353
# Add any records whose basis parent is now available.
1354
added_keys = [record.key]
1356
key = added_keys.pop(0)
1357
if key in buffered_index_entries:
1358
index_entries = buffered_index_entries[key]
1359
self._index.add_records(index_entries)
1361
[index_entry[0] for index_entry in index_entries])
1362
del buffered_index_entries[key]
1363
# If there were any deltas which had a missing basis parent, error.
1364
if buffered_index_entries:
1365
raise errors.RevisionNotPresent(buffered_index_entries.keys()[0],
1368
def iter_lines_added_or_present_in_keys(self, keys, pb=None):
1369
"""Iterate over the lines in the versioned files from keys.
1371
This may return lines from other keys. Each item the returned
1372
iterator yields is a tuple of a line and a text version that that line
1373
is present in (not introduced in).
1375
Ordering of results is in whatever order is most suitable for the
1376
underlying storage format.
1378
If a progress bar is supplied, it may be used to indicate progress.
1379
The caller is responsible for cleaning up progress bars (because this
1383
* Lines are normalised by the underlying store: they will all have \n
1385
* Lines are returned in arbitrary order.
1387
:return: An iterator over (line, key).
1390
pb = progress.DummyProgress()
1393
# we don't care about inclusions, the caller cares.
1394
# but we need to setup a list of records to visit.
1395
# we need key, position, length
1397
build_details = self._index.get_build_details(keys)
1398
for key, details in build_details.iteritems():
1400
key_records.append((key, details[0]))
1402
records_iter = enumerate(self._read_records_iter(key_records))
1403
for (key_idx, (key, data, sha_value)) in records_iter:
1404
pb.update('Walking content.', key_idx, total)
1405
compression_parent = build_details[key][1]
1406
if compression_parent is None:
1408
line_iterator = self._factory.get_fulltext_content(data)
1411
line_iterator = self._factory.get_linedelta_content(data)
1412
# XXX: It might be more efficient to yield (key,
1413
# line_iterator) in the future. However for now, this is a simpler
1414
# change to integrate into the rest of the codebase. RBC 20071110
1415
for line in line_iterator:
1417
for source in self._fallback_vfs:
1421
for line, key in source.iter_lines_added_or_present_in_keys(keys):
1422
source_keys.add(key)
1424
keys.difference_update(source_keys)
1426
raise RevisionNotPresent(keys, self.filename)
1427
pb.update('Walking content.', total, total)
1429
def _make_line_delta(self, delta_seq, new_content):
1430
"""Generate a line delta from delta_seq and new_content."""
1432
for op in delta_seq.get_opcodes():
1433
if op[0] == 'equal':
1435
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
1438
def _merge_annotations(self, content, parents, parent_texts={},
1439
delta=None, annotated=None,
1440
left_matching_blocks=None):
1441
"""Merge annotations for content and generate deltas.
1443
This is done by comparing the annotations based on changes to the text
1444
and generating a delta on the resulting full texts. If annotations are
1445
not being created then a simple delta is created.
1447
if left_matching_blocks is not None:
1448
delta_seq = diff._PrematchedMatcher(left_matching_blocks)
1452
for parent_key in parents:
1453
merge_content = self._get_content(parent_key, parent_texts)
1454
if (parent_key == parents[0] and delta_seq is not None):
1457
seq = patiencediff.PatienceSequenceMatcher(
1458
None, merge_content.text(), content.text())
1459
for i, j, n in seq.get_matching_blocks():
1462
# this copies (origin, text) pairs across to the new
1463
# content for any line that matches the last-checked
1465
content._lines[j:j+n] = merge_content._lines[i:i+n]
1466
# XXX: Robert says the following block is a workaround for a
1467
# now-fixed bug and it can probably be deleted. -- mbp 20080618
1468
if content._lines and content._lines[-1][1][-1] != '\n':
1469
# The copied annotation was from a line without a trailing EOL,
1470
# reinstate one for the content object, to ensure correct
1472
line = content._lines[-1][1] + '\n'
1473
content._lines[-1] = (content._lines[-1][0], line)
1475
if delta_seq is None:
1476
reference_content = self._get_content(parents[0], parent_texts)
1477
new_texts = content.text()
1478
old_texts = reference_content.text()
1479
delta_seq = patiencediff.PatienceSequenceMatcher(
1480
None, old_texts, new_texts)
1481
return self._make_line_delta(delta_seq, content)
1483
def _parse_record(self, version_id, data):
1484
"""Parse an original format knit record.
1486
These have the last element of the key only present in the stored data.
1488
rec, record_contents = self._parse_record_unchecked(data)
1489
self._check_header_version(rec, version_id)
1490
return record_contents, rec[3]
1492
def _parse_record_header(self, key, raw_data):
1493
"""Parse a record header for consistency.
1495
:return: the header and the decompressor stream.
1496
as (stream, header_record)
1498
df = GzipFile(mode='rb', fileobj=StringIO(raw_data))
1501
rec = self._check_header(key, df.readline())
1502
except Exception, e:
1503
raise KnitCorrupt(self,
1504
"While reading {%s} got %s(%s)"
1505
% (key, e.__class__.__name__, str(e)))
1508
def _parse_record_unchecked(self, data):
1510
# 4168 calls in 2880 217 internal
1511
# 4168 calls to _parse_record_header in 2121
1512
# 4168 calls to readlines in 330
1513
df = GzipFile(mode='rb', fileobj=StringIO(data))
1515
record_contents = df.readlines()
1516
except Exception, e:
1517
raise KnitCorrupt(self, "Corrupt compressed record %r, got %s(%s)" %
1518
(data, e.__class__.__name__, str(e)))
1519
header = record_contents.pop(0)
1520
rec = self._split_header(header)
1521
last_line = record_contents.pop()
1522
if len(record_contents) != int(rec[2]):
1523
raise KnitCorrupt(self,
1524
'incorrect number of lines %s != %s'
1525
' for version {%s} %s'
1526
% (len(record_contents), int(rec[2]),
1527
rec[1], record_contents))
1528
if last_line != 'end %s\n' % rec[1]:
1529
raise KnitCorrupt(self,
1530
'unexpected version end line %r, wanted %r'
1531
% (last_line, rec[1]))
1533
return rec, record_contents
1535
def _read_records_iter(self, records):
1536
"""Read text records from data file and yield result.
1538
The result will be returned in whatever is the fastest to read.
1539
Not by the order requested. Also, multiple requests for the same
1540
record will only yield 1 response.
1541
:param records: A list of (key, access_memo) entries
1542
:return: Yields (key, contents, digest) in the order
1543
read, not the order requested
1548
# XXX: This smells wrong, IO may not be getting ordered right.
1549
needed_records = sorted(set(records), key=operator.itemgetter(1))
1550
if not needed_records:
1553
# The transport optimizes the fetching as well
1554
# (ie, reads continuous ranges.)
1555
raw_data = self._access.get_raw_records(
1556
[index_memo for key, index_memo in needed_records])
1558
for (key, index_memo), data in \
1559
izip(iter(needed_records), raw_data):
1560
content, digest = self._parse_record(key[-1], data)
1561
yield key, content, digest
1563
def _read_records_iter_raw(self, records):
1564
"""Read text records from data file and yield raw data.
1566
This unpacks enough of the text record to validate the id is
1567
as expected but thats all.
1569
Each item the iterator yields is (key, bytes, sha1_of_full_text).
1571
# setup an iterator of the external records:
1572
# uses readv so nice and fast we hope.
1574
# grab the disk data needed.
1575
needed_offsets = [index_memo for key, index_memo
1577
raw_records = self._access.get_raw_records(needed_offsets)
1579
for key, index_memo in records:
1580
data = raw_records.next()
1581
# validate the header (note that we can only use the suffix in
1582
# current knit records).
1583
df, rec = self._parse_record_header(key, data)
1585
yield key, data, rec[3]
1587
def _record_to_data(self, key, digest, lines, dense_lines=None):
1588
"""Convert key, digest, lines into a raw data block.
1590
:param key: The key of the record. Currently keys are always serialised
1591
using just the trailing component.
1592
:param dense_lines: The bytes of lines but in a denser form. For
1593
instance, if lines is a list of 1000 bytestrings each ending in \n,
1594
dense_lines may be a list with one line in it, containing all the
1595
1000's lines and their \n's. Using dense_lines if it is already
1596
known is a win because the string join to create bytes in this
1597
function spends less time resizing the final string.
1598
:return: (len, a StringIO instance with the raw data ready to read.)
1600
# Note: using a string copy here increases memory pressure with e.g.
1601
# ISO's, but it is about 3 seconds faster on a 1.2Ghz intel machine
1602
# when doing the initial commit of a mozilla tree. RBC 20070921
1603
bytes = ''.join(chain(
1604
["version %s %d %s\n" % (key[-1],
1607
dense_lines or lines,
1608
["end %s\n" % key[-1]]))
1609
if type(bytes) != str:
1610
raise AssertionError(
1611
'data must be plain bytes was %s' % type(bytes))
1612
if lines and lines[-1][-1] != '\n':
1613
raise ValueError('corrupt lines value %r' % lines)
1614
compressed_bytes = bytes_to_gzip(bytes)
1615
return len(compressed_bytes), compressed_bytes
1617
def _split_header(self, line):
1620
raise KnitCorrupt(self,
1621
'unexpected number of elements in record header')
1625
"""See VersionedFiles.keys."""
1626
if 'evil' in debug.debug_flags:
1627
trace.mutter_callsite(2, "keys scales with size of history")
1628
sources = [self._index] + self._fallback_vfs
1630
for source in sources:
1631
result.update(source.keys())
1636
class _KndxIndex(object):
1637
"""Manages knit index files
1639
The index is kept in memory and read on startup, to enable
1640
fast lookups of revision information. The cursor of the index
1641
file is always pointing to the end, making it easy to append
1644
_cache is a cache for fast mapping from version id to a Index
1647
_history is a cache for fast mapping from indexes to version ids.
1649
The index data format is dictionary compressed when it comes to
1650
parent references; a index entry may only have parents that with a
1651
lover index number. As a result, the index is topological sorted.
1653
Duplicate entries may be written to the index for a single version id
1654
if this is done then the latter one completely replaces the former:
1655
this allows updates to correct version and parent information.
1656
Note that the two entries may share the delta, and that successive
1657
annotations and references MUST point to the first entry.
1659
The index file on disc contains a header, followed by one line per knit
1660
record. The same revision can be present in an index file more than once.
1661
The first occurrence gets assigned a sequence number starting from 0.
1663
The format of a single line is
1664
REVISION_ID FLAGS BYTE_OFFSET LENGTH( PARENT_ID|PARENT_SEQUENCE_ID)* :\n
1665
REVISION_ID is a utf8-encoded revision id
1666
FLAGS is a comma separated list of flags about the record. Values include
1667
no-eol, line-delta, fulltext.
1668
BYTE_OFFSET is the ascii representation of the byte offset in the data file
1669
that the the compressed data starts at.
1670
LENGTH is the ascii representation of the length of the data file.
1671
PARENT_ID a utf-8 revision id prefixed by a '.' that is a parent of
1673
PARENT_SEQUENCE_ID the ascii representation of the sequence number of a
1674
revision id already in the knit that is a parent of REVISION_ID.
1675
The ' :' marker is the end of record marker.
1678
when a write is interrupted to the index file, it will result in a line
1679
that does not end in ' :'. If the ' :' is not present at the end of a line,
1680
or at the end of the file, then the record that is missing it will be
1681
ignored by the parser.
1683
When writing new records to the index file, the data is preceded by '\n'
1684
to ensure that records always start on new lines even if the last write was
1685
interrupted. As a result its normal for the last line in the index to be
1686
missing a trailing newline. One can be added with no harmful effects.
1688
:ivar _kndx_cache: dict from prefix to the old state of KnitIndex objects,
1689
where prefix is e.g. the (fileid,) for .texts instances or () for
1690
constant-mapped things like .revisions, and the old state is
1691
tuple(history_vector, cache_dict). This is used to prevent having an
1692
ABI change with the C extension that reads .kndx files.
1695
HEADER = "# bzr knit index 8\n"
1697
def __init__(self, transport, mapper, get_scope, allow_writes, is_locked):
1698
"""Create a _KndxIndex on transport using mapper."""
1699
self._transport = transport
1700
self._mapper = mapper
1701
self._get_scope = get_scope
1702
self._allow_writes = allow_writes
1703
self._is_locked = is_locked
1705
self.has_graph = True
1707
def add_records(self, records, random_id=False):
1708
"""Add multiple records to the index.
1710
:param records: a list of tuples:
1711
(key, options, access_memo, parents).
1712
:param random_id: If True the ids being added were randomly generated
1713
and no check for existence will be performed.
1716
for record in records:
1719
path = self._mapper.map(key) + '.kndx'
1720
path_keys = paths.setdefault(path, (prefix, []))
1721
path_keys[1].append(record)
1722
for path in sorted(paths):
1723
prefix, path_keys = paths[path]
1724
self._load_prefixes([prefix])
1726
orig_history = self._kndx_cache[prefix][1][:]
1727
orig_cache = self._kndx_cache[prefix][0].copy()
1730
for key, options, (_, pos, size), parents in path_keys:
1732
# kndx indices cannot be parentless.
1734
line = "\n%s %s %s %s %s :" % (
1735
key[-1], ','.join(options), pos, size,
1736
self._dictionary_compress(parents))
1737
if type(line) != str:
1738
raise AssertionError(
1739
'data must be utf8 was %s' % type(line))
1741
self._cache_key(key, options, pos, size, parents)
1742
if len(orig_history):
1743
self._transport.append_bytes(path, ''.join(lines))
1745
self._init_index(path, lines)
1747
# If any problems happen, restore the original values and re-raise
1748
self._kndx_cache[prefix] = (orig_cache, orig_history)
1751
def _cache_key(self, key, options, pos, size, parent_keys):
1752
"""Cache a version record in the history array and index cache.
1754
This is inlined into _load_data for performance. KEEP IN SYNC.
1755
(It saves 60ms, 25% of the __init__ overhead on local 4000 record
1759
version_id = key[-1]
1760
# last-element only for compatibilty with the C load_data.
1761
parents = tuple(parent[-1] for parent in parent_keys)
1762
for parent in parent_keys:
1763
if parent[:-1] != prefix:
1764
raise ValueError("mismatched prefixes for %r, %r" % (
1766
cache, history = self._kndx_cache[prefix]
1767
# only want the _history index to reference the 1st index entry
1769
if version_id not in cache:
1770
index = len(history)
1771
history.append(version_id)
1773
index = cache[version_id][5]
1774
cache[version_id] = (version_id,
1781
def check_header(self, fp):
1782
line = fp.readline()
1784
# An empty file can actually be treated as though the file doesn't
1786
raise errors.NoSuchFile(self)
1787
if line != self.HEADER:
1788
raise KnitHeaderError(badline=line, filename=self)
1790
def _check_read(self):
1791
if not self._is_locked():
1792
raise errors.ObjectNotLocked(self)
1793
if self._get_scope() != self._scope:
1796
def _check_write_ok(self):
1797
"""Assert if not writes are permitted."""
1798
if not self._is_locked():
1799
raise errors.ObjectNotLocked(self)
1800
if self._get_scope() != self._scope:
1802
if self._mode != 'w':
1803
raise errors.ReadOnlyObjectDirtiedError(self)
1805
def get_build_details(self, keys):
1806
"""Get the method, index_memo and compression parent for keys.
1808
Ghosts are omitted from the result.
1810
:param keys: An iterable of keys.
1811
:return: A dict of key:(index_memo, compression_parent, parents,
1814
opaque structure to pass to read_records to extract the raw
1817
Content that this record is built upon, may be None
1819
Logical parents of this node
1821
extra information about the content which needs to be passed to
1822
Factory.parse_record
1824
prefixes = self._partition_keys(keys)
1825
parent_map = self.get_parent_map(keys)
1828
if key not in parent_map:
1830
method = self.get_method(key)
1831
parents = parent_map[key]
1832
if method == 'fulltext':
1833
compression_parent = None
1835
compression_parent = parents[0]
1836
noeol = 'no-eol' in self.get_options(key)
1837
index_memo = self.get_position(key)
1838
result[key] = (index_memo, compression_parent,
1839
parents, (method, noeol))
1842
def get_method(self, key):
1843
"""Return compression method of specified key."""
1844
options = self.get_options(key)
1845
if 'fulltext' in options:
1847
elif 'line-delta' in options:
1850
raise errors.KnitIndexUnknownMethod(self, options)
1852
def get_options(self, key):
1853
"""Return a list representing options.
1857
prefix, suffix = self._split_key(key)
1858
self._load_prefixes([prefix])
1860
return self._kndx_cache[prefix][0][suffix][1]
1862
raise RevisionNotPresent(key, self)
1864
def get_parent_map(self, keys):
1865
"""Get a map of the parents of keys.
1867
:param keys: The keys to look up parents for.
1868
:return: A mapping from keys to parents. Absent keys are absent from
1871
# Parse what we need to up front, this potentially trades off I/O
1872
# locality (.kndx and .knit in the same block group for the same file
1873
# id) for less checking in inner loops.
1874
prefixes = set(key[:-1] for key in keys)
1875
self._load_prefixes(prefixes)
1880
suffix_parents = self._kndx_cache[prefix][0][key[-1]][4]
1884
result[key] = tuple(prefix + (suffix,) for
1885
suffix in suffix_parents)
1888
def get_position(self, key):
1889
"""Return details needed to access the version.
1891
:return: a tuple (key, data position, size) to hand to the access
1892
logic to get the record.
1894
prefix, suffix = self._split_key(key)
1895
self._load_prefixes([prefix])
1896
entry = self._kndx_cache[prefix][0][suffix]
1897
return key, entry[2], entry[3]
1899
def _init_index(self, path, extra_lines=[]):
1900
"""Initialize an index."""
1902
sio.write(self.HEADER)
1903
sio.writelines(extra_lines)
1905
self._transport.put_file_non_atomic(path, sio,
1906
create_parent_dir=True)
1907
# self._create_parent_dir)
1908
# mode=self._file_mode,
1909
# dir_mode=self._dir_mode)
1912
"""Get all the keys in the collection.
1914
The keys are not ordered.
1917
# Identify all key prefixes.
1918
# XXX: A bit hacky, needs polish.
1919
if type(self._mapper) == ConstantMapper:
1923
for quoted_relpath in self._transport.iter_files_recursive():
1924
path, ext = os.path.splitext(quoted_relpath)
1926
prefixes = [self._mapper.unmap(path) for path in relpaths]
1927
self._load_prefixes(prefixes)
1928
for prefix in prefixes:
1929
for suffix in self._kndx_cache[prefix][1]:
1930
result.add(prefix + (suffix,))
1933
def _load_prefixes(self, prefixes):
1934
"""Load the indices for prefixes."""
1936
for prefix in prefixes:
1937
if prefix not in self._kndx_cache:
1938
# the load_data interface writes to these variables.
1941
self._filename = prefix
1943
path = self._mapper.map(prefix) + '.kndx'
1944
fp = self._transport.get(path)
1946
# _load_data may raise NoSuchFile if the target knit is
1948
_load_data(self, fp)
1951
self._kndx_cache[prefix] = (self._cache, self._history)
1956
self._kndx_cache[prefix] = ({}, [])
1957
if type(self._mapper) == ConstantMapper:
1958
# preserve behaviour for revisions.kndx etc.
1959
self._init_index(path)
1964
def _partition_keys(self, keys):
1965
"""Turn keys into a dict of prefix:suffix_list."""
1968
prefix_keys = result.setdefault(key[:-1], [])
1969
prefix_keys.append(key[-1])
1972
def _dictionary_compress(self, keys):
1973
"""Dictionary compress keys.
1975
:param keys: The keys to generate references to.
1976
:return: A string representation of keys. keys which are present are
1977
dictionary compressed, and others are emitted as fulltext with a
1983
prefix = keys[0][:-1]
1984
cache = self._kndx_cache[prefix][0]
1986
if key[:-1] != prefix:
1987
# kndx indices cannot refer across partitioned storage.
1988
raise ValueError("mismatched prefixes for %r" % keys)
1989
if key[-1] in cache:
1990
# -- inlined lookup() --
1991
result_list.append(str(cache[key[-1]][5]))
1992
# -- end lookup () --
1994
result_list.append('.' + key[-1])
1995
return ' '.join(result_list)
1997
def _reset_cache(self):
1998
# Possibly this should be a LRU cache. A dictionary from key_prefix to
1999
# (cache_dict, history_vector) for parsed kndx files.
2000
self._kndx_cache = {}
2001
self._scope = self._get_scope()
2002
allow_writes = self._allow_writes()
2008
def _split_key(self, key):
2009
"""Split key into a prefix and suffix."""
2010
return key[:-1], key[-1]
2013
class _KnitGraphIndex(object):
2014
"""A KnitVersionedFiles index layered on GraphIndex."""
2016
def __init__(self, graph_index, is_locked, deltas=False, parents=True,
2018
"""Construct a KnitGraphIndex on a graph_index.
2020
:param graph_index: An implementation of bzrlib.index.GraphIndex.
2021
:param is_locked: A callback to check whether the object should answer
2023
:param deltas: Allow delta-compressed records.
2024
:param parents: If True, record knits parents, if not do not record
2026
:param add_callback: If not None, allow additions to the index and call
2027
this callback with a list of added GraphIndex nodes:
2028
[(node, value, node_refs), ...]
2029
:param is_locked: A callback, returns True if the index is locked and
2032
self._add_callback = add_callback
2033
self._graph_index = graph_index
2034
self._deltas = deltas
2035
self._parents = parents
2036
if deltas and not parents:
2037
# XXX: TODO: Delta tree and parent graph should be conceptually
2039
raise KnitCorrupt(self, "Cannot do delta compression without "
2041
self.has_graph = parents
2042
self._is_locked = is_locked
2044
def add_records(self, records, random_id=False):
2045
"""Add multiple records to the index.
2047
This function does not insert data into the Immutable GraphIndex
2048
backing the KnitGraphIndex, instead it prepares data for insertion by
2049
the caller and checks that it is safe to insert then calls
2050
self._add_callback with the prepared GraphIndex nodes.
2052
:param records: a list of tuples:
2053
(key, options, access_memo, parents).
2054
:param random_id: If True the ids being added were randomly generated
2055
and no check for existence will be performed.
2057
if not self._add_callback:
2058
raise errors.ReadOnlyError(self)
2059
# we hope there are no repositories with inconsistent parentage
2063
for (key, options, access_memo, parents) in records:
2065
parents = tuple(parents)
2066
index, pos, size = access_memo
2067
if 'no-eol' in options:
2071
value += "%d %d" % (pos, size)
2072
if not self._deltas:
2073
if 'line-delta' in options:
2074
raise KnitCorrupt(self, "attempt to add line-delta in non-delta knit")
2077
if 'line-delta' in options:
2078
node_refs = (parents, (parents[0],))
2080
node_refs = (parents, ())
2082
node_refs = (parents, )
2085
raise KnitCorrupt(self, "attempt to add node with parents "
2086
"in parentless index.")
2088
keys[key] = (value, node_refs)
2091
present_nodes = self._get_entries(keys)
2092
for (index, key, value, node_refs) in present_nodes:
2093
if (value[0] != keys[key][0][0] or
2094
node_refs != keys[key][1]):
2095
raise KnitCorrupt(self, "inconsistent details in add_records"
2096
": %s %s" % ((value, node_refs), keys[key]))
2100
for key, (value, node_refs) in keys.iteritems():
2101
result.append((key, value, node_refs))
2103
for key, (value, node_refs) in keys.iteritems():
2104
result.append((key, value))
2105
self._add_callback(result)
2107
def _check_read(self):
2108
"""raise if reads are not permitted."""
2109
if not self._is_locked():
2110
raise errors.ObjectNotLocked(self)
2112
def _check_write_ok(self):
2113
"""Assert if writes are not permitted."""
2114
if not self._is_locked():
2115
raise errors.ObjectNotLocked(self)
2117
def _compression_parent(self, an_entry):
2118
# return the key that an_entry is compressed against, or None
2119
# Grab the second parent list (as deltas implies parents currently)
2120
compression_parents = an_entry[3][1]
2121
if not compression_parents:
2123
if len(compression_parents) != 1:
2124
raise AssertionError(
2125
"Too many compression parents: %r" % compression_parents)
2126
return compression_parents[0]
2128
def get_build_details(self, keys):
2129
"""Get the method, index_memo and compression parent for version_ids.
2131
Ghosts are omitted from the result.
2133
:param keys: An iterable of keys.
2134
:return: A dict of key:
2135
(index_memo, compression_parent, parents, record_details).
2137
opaque structure to pass to read_records to extract the raw
2140
Content that this record is built upon, may be None
2142
Logical parents of this node
2144
extra information about the content which needs to be passed to
2145
Factory.parse_record
2149
entries = self._get_entries(keys, False)
2150
for entry in entries:
2152
if not self._parents:
2155
parents = entry[3][0]
2156
if not self._deltas:
2157
compression_parent_key = None
2159
compression_parent_key = self._compression_parent(entry)
2160
noeol = (entry[2][0] == 'N')
2161
if compression_parent_key:
2162
method = 'line-delta'
2165
result[key] = (self._node_to_position(entry),
2166
compression_parent_key, parents,
2170
def _get_entries(self, keys, check_present=False):
2171
"""Get the entries for keys.
2173
:param keys: An iterable of index key tuples.
2178
for node in self._graph_index.iter_entries(keys):
2180
found_keys.add(node[1])
2182
# adapt parentless index to the rest of the code.
2183
for node in self._graph_index.iter_entries(keys):
2184
yield node[0], node[1], node[2], ()
2185
found_keys.add(node[1])
2187
missing_keys = keys.difference(found_keys)
2189
raise RevisionNotPresent(missing_keys.pop(), self)
2191
def get_method(self, key):
2192
"""Return compression method of specified key."""
2193
return self._get_method(self._get_node(key))
2195
def _get_method(self, node):
2196
if not self._deltas:
2198
if self._compression_parent(node):
2203
def _get_node(self, key):
2205
return list(self._get_entries([key]))[0]
2207
raise RevisionNotPresent(key, self)
2209
def get_options(self, key):
2210
"""Return a list representing options.
2214
node = self._get_node(key)
2215
options = [self._get_method(node)]
2216
if node[2][0] == 'N':
2217
options.append('no-eol')
2220
def get_parent_map(self, keys):
2221
"""Get a map of the parents of keys.
2223
:param keys: The keys to look up parents for.
2224
:return: A mapping from keys to parents. Absent keys are absent from
2228
nodes = self._get_entries(keys)
2232
result[node[1]] = node[3][0]
2235
result[node[1]] = None
2238
def get_position(self, key):
2239
"""Return details needed to access the version.
2241
:return: a tuple (index, data position, size) to hand to the access
2242
logic to get the record.
2244
node = self._get_node(key)
2245
return self._node_to_position(node)
2248
"""Get all the keys in the collection.
2250
The keys are not ordered.
2253
return [node[1] for node in self._graph_index.iter_all_entries()]
2255
def _node_to_position(self, node):
2256
"""Convert an index value to position details."""
2257
bits = node[2][1:].split(' ')
2258
return node[0], int(bits[0]), int(bits[1])
2261
class _KnitKeyAccess(object):
2262
"""Access to records in .knit files."""
2264
def __init__(self, transport, mapper):
2265
"""Create a _KnitKeyAccess with transport and mapper.
2267
:param transport: The transport the access object is rooted at.
2268
:param mapper: The mapper used to map keys to .knit files.
2270
self._transport = transport
2271
self._mapper = mapper
2273
def add_raw_records(self, key_sizes, raw_data):
2274
"""Add raw knit bytes to a storage area.
2276
The data is spooled to the container writer in one bytes-record per
2279
:param sizes: An iterable of tuples containing the key and size of each
2281
:param raw_data: A bytestring containing the data.
2282
:return: A list of memos to retrieve the record later. Each memo is an
2283
opaque index memo. For _KnitKeyAccess the memo is (key, pos,
2284
length), where the key is the record key.
2286
if type(raw_data) != str:
2287
raise AssertionError(
2288
'data must be plain bytes was %s' % type(raw_data))
2291
# TODO: This can be tuned for writing to sftp and other servers where
2292
# append() is relatively expensive by grouping the writes to each key
2294
for key, size in key_sizes:
2295
path = self._mapper.map(key)
2297
base = self._transport.append_bytes(path + '.knit',
2298
raw_data[offset:offset+size])
2299
except errors.NoSuchFile:
2300
self._transport.mkdir(osutils.dirname(path))
2301
base = self._transport.append_bytes(path + '.knit',
2302
raw_data[offset:offset+size])
2306
result.append((key, base, size))
2309
def get_raw_records(self, memos_for_retrieval):
2310
"""Get the raw bytes for a records.
2312
:param memos_for_retrieval: An iterable containing the access memo for
2313
retrieving the bytes.
2314
:return: An iterator over the bytes of the records.
2316
# first pass, group into same-index request to minimise readv's issued.
2318
current_prefix = None
2319
for (key, offset, length) in memos_for_retrieval:
2320
if current_prefix == key[:-1]:
2321
current_list.append((offset, length))
2323
if current_prefix is not None:
2324
request_lists.append((current_prefix, current_list))
2325
current_prefix = key[:-1]
2326
current_list = [(offset, length)]
2327
# handle the last entry
2328
if current_prefix is not None:
2329
request_lists.append((current_prefix, current_list))
2330
for prefix, read_vector in request_lists:
2331
path = self._mapper.map(prefix) + '.knit'
2332
for pos, data in self._transport.readv(path, read_vector):
2336
class _DirectPackAccess(object):
2337
"""Access to data in one or more packs with less translation."""
2339
def __init__(self, index_to_packs):
2340
"""Create a _DirectPackAccess object.
2342
:param index_to_packs: A dict mapping index objects to the transport
2343
and file names for obtaining data.
2345
self._container_writer = None
2346
self._write_index = None
2347
self._indices = index_to_packs
2349
def add_raw_records(self, key_sizes, raw_data):
2350
"""Add raw knit bytes to a storage area.
2352
The data is spooled to the container writer in one bytes-record per
2355
:param sizes: An iterable of tuples containing the key and size of each
2357
:param raw_data: A bytestring containing the data.
2358
:return: A list of memos to retrieve the record later. Each memo is an
2359
opaque index memo. For _DirectPackAccess the memo is (index, pos,
2360
length), where the index field is the write_index object supplied
2361
to the PackAccess object.
2363
if type(raw_data) != str:
2364
raise AssertionError(
2365
'data must be plain bytes was %s' % type(raw_data))
2368
for key, size in key_sizes:
2369
p_offset, p_length = self._container_writer.add_bytes_record(
2370
raw_data[offset:offset+size], [])
2372
result.append((self._write_index, p_offset, p_length))
2375
def get_raw_records(self, memos_for_retrieval):
2376
"""Get the raw bytes for a records.
2378
:param memos_for_retrieval: An iterable containing the (index, pos,
2379
length) memo for retrieving the bytes. The Pack access method
2380
looks up the pack to use for a given record in its index_to_pack
2382
:return: An iterator over the bytes of the records.
2384
# first pass, group into same-index requests
2386
current_index = None
2387
for (index, offset, length) in memos_for_retrieval:
2388
if current_index == index:
2389
current_list.append((offset, length))
2391
if current_index is not None:
2392
request_lists.append((current_index, current_list))
2393
current_index = index
2394
current_list = [(offset, length)]
2395
# handle the last entry
2396
if current_index is not None:
2397
request_lists.append((current_index, current_list))
2398
for index, offsets in request_lists:
2399
transport, path = self._indices[index]
2400
reader = pack.make_readv_reader(transport, path, offsets)
2401
for names, read_func in reader.iter_records():
2402
yield read_func(None)
2404
def set_writer(self, writer, index, transport_packname):
2405
"""Set a writer to use for adding data."""
2406
if index is not None:
2407
self._indices[index] = transport_packname
2408
self._container_writer = writer
2409
self._write_index = index
2412
# Deprecated, use PatienceSequenceMatcher instead
2413
KnitSequenceMatcher = patiencediff.PatienceSequenceMatcher
2416
def annotate_knit(knit, revision_id):
2417
"""Annotate a knit with no cached annotations.
2419
This implementation is for knits with no cached annotations.
2420
It will work for knits with cached annotations, but this is not
2423
annotator = _KnitAnnotator(knit)
2424
return iter(annotator.annotate(revision_id))
2427
class _KnitAnnotator(object):
2428
"""Build up the annotations for a text."""
2430
def __init__(self, knit):
2433
# Content objects, differs from fulltexts because of how final newlines
2434
# are treated by knits. the content objects here will always have a
2436
self._fulltext_contents = {}
2438
# Annotated lines of specific revisions
2439
self._annotated_lines = {}
2441
# Track the raw data for nodes that we could not process yet.
2442
# This maps the revision_id of the base to a list of children that will
2443
# annotated from it.
2444
self._pending_children = {}
2446
# Nodes which cannot be extracted
2447
self._ghosts = set()
2449
# Track how many children this node has, so we know if we need to keep
2451
self._annotate_children = {}
2452
self._compression_children = {}
2454
self._all_build_details = {}
2455
# The children => parent revision_id graph
2456
self._revision_id_graph = {}
2458
self._heads_provider = None
2460
self._nodes_to_keep_annotations = set()
2461
self._generations_until_keep = 100
2463
def set_generations_until_keep(self, value):
2464
"""Set the number of generations before caching a node.
2466
Setting this to -1 will cache every merge node, setting this higher
2467
will cache fewer nodes.
2469
self._generations_until_keep = value
2471
def _add_fulltext_content(self, revision_id, content_obj):
2472
self._fulltext_contents[revision_id] = content_obj
2473
# TODO: jam 20080305 It might be good to check the sha1digest here
2474
return content_obj.text()
2476
def _check_parents(self, child, nodes_to_annotate):
2477
"""Check if all parents have been processed.
2479
:param child: A tuple of (rev_id, parents, raw_content)
2480
:param nodes_to_annotate: If child is ready, add it to
2481
nodes_to_annotate, otherwise put it back in self._pending_children
2483
for parent_id in child[1]:
2484
if (parent_id not in self._annotated_lines):
2485
# This parent is present, but another parent is missing
2486
self._pending_children.setdefault(parent_id,
2490
# This one is ready to be processed
2491
nodes_to_annotate.append(child)
2493
def _add_annotation(self, revision_id, fulltext, parent_ids,
2494
left_matching_blocks=None):
2495
"""Add an annotation entry.
2497
All parents should already have been annotated.
2498
:return: A list of children that now have their parents satisfied.
2500
a = self._annotated_lines
2501
annotated_parent_lines = [a[p] for p in parent_ids]
2502
annotated_lines = list(annotate.reannotate(annotated_parent_lines,
2503
fulltext, revision_id, left_matching_blocks,
2504
heads_provider=self._get_heads_provider()))
2505
self._annotated_lines[revision_id] = annotated_lines
2506
for p in parent_ids:
2507
ann_children = self._annotate_children[p]
2508
ann_children.remove(revision_id)
2509
if (not ann_children
2510
and p not in self._nodes_to_keep_annotations):
2511
del self._annotated_lines[p]
2512
del self._all_build_details[p]
2513
if p in self._fulltext_contents:
2514
del self._fulltext_contents[p]
2515
# Now that we've added this one, see if there are any pending
2516
# deltas to be done, certainly this parent is finished
2517
nodes_to_annotate = []
2518
for child in self._pending_children.pop(revision_id, []):
2519
self._check_parents(child, nodes_to_annotate)
2520
return nodes_to_annotate
2522
def _get_build_graph(self, key):
2523
"""Get the graphs for building texts and annotations.
2525
The data you need for creating a full text may be different than the
2526
data you need to annotate that text. (At a minimum, you need both
2527
parents to create an annotation, but only need 1 parent to generate the
2530
:return: A list of (key, index_memo) records, suitable for
2531
passing to read_records_iter to start reading in the raw data fro/
2534
if key in self._annotated_lines:
2537
pending = set([key])
2542
# get all pending nodes
2544
this_iteration = pending
2545
build_details = self._knit._index.get_build_details(this_iteration)
2546
self._all_build_details.update(build_details)
2547
# new_nodes = self._knit._index._get_entries(this_iteration)
2549
for key, details in build_details.iteritems():
2550
(index_memo, compression_parent, parents,
2551
record_details) = details
2552
self._revision_id_graph[key] = parents
2553
records.append((key, index_memo))
2554
# Do we actually need to check _annotated_lines?
2555
pending.update(p for p in parents
2556
if p not in self._all_build_details)
2557
if compression_parent:
2558
self._compression_children.setdefault(compression_parent,
2561
for parent in parents:
2562
self._annotate_children.setdefault(parent,
2564
num_gens = generation - kept_generation
2565
if ((num_gens >= self._generations_until_keep)
2566
and len(parents) > 1):
2567
kept_generation = generation
2568
self._nodes_to_keep_annotations.add(key)
2570
missing_versions = this_iteration.difference(build_details.keys())
2571
self._ghosts.update(missing_versions)
2572
for missing_version in missing_versions:
2573
# add a key, no parents
2574
self._revision_id_graph[missing_version] = ()
2575
pending.discard(missing_version) # don't look for it
2576
if self._ghosts.intersection(self._compression_children):
2578
"We cannot have nodes which have a ghost compression parent:\n"
2580
"compression children: %r"
2581
% (self._ghosts, self._compression_children))
2582
# Cleanout anything that depends on a ghost so that we don't wait for
2583
# the ghost to show up
2584
for node in self._ghosts:
2585
if node in self._annotate_children:
2586
# We won't be building this node
2587
del self._annotate_children[node]
2588
# Generally we will want to read the records in reverse order, because
2589
# we find the parent nodes after the children
2593
def _annotate_records(self, records):
2594
"""Build the annotations for the listed records."""
2595
# We iterate in the order read, rather than a strict order requested
2596
# However, process what we can, and put off to the side things that
2597
# still need parents, cleaning them up when those parents are
2599
for (rev_id, record,
2600
digest) in self._knit._read_records_iter(records):
2601
if rev_id in self._annotated_lines:
2603
parent_ids = self._revision_id_graph[rev_id]
2604
parent_ids = [p for p in parent_ids if p not in self._ghosts]
2605
details = self._all_build_details[rev_id]
2606
(index_memo, compression_parent, parents,
2607
record_details) = details
2608
nodes_to_annotate = []
2609
# TODO: Remove the punning between compression parents, and
2610
# parent_ids, we should be able to do this without assuming
2612
if len(parent_ids) == 0:
2613
# There are no parents for this node, so just add it
2614
# TODO: This probably needs to be decoupled
2615
fulltext_content, delta = self._knit._factory.parse_record(
2616
rev_id, record, record_details, None)
2617
fulltext = self._add_fulltext_content(rev_id, fulltext_content)
2618
nodes_to_annotate.extend(self._add_annotation(rev_id, fulltext,
2619
parent_ids, left_matching_blocks=None))
2621
child = (rev_id, parent_ids, record)
2622
# Check if all the parents are present
2623
self._check_parents(child, nodes_to_annotate)
2624
while nodes_to_annotate:
2625
# Should we use a queue here instead of a stack?
2626
(rev_id, parent_ids, record) = nodes_to_annotate.pop()
2627
(index_memo, compression_parent, parents,
2628
record_details) = self._all_build_details[rev_id]
2629
if compression_parent is not None:
2630
comp_children = self._compression_children[compression_parent]
2631
if rev_id not in comp_children:
2632
raise AssertionError("%r not in compression children %r"
2633
% (rev_id, comp_children))
2634
# If there is only 1 child, it is safe to reuse this
2636
reuse_content = (len(comp_children) == 1
2637
and compression_parent not in
2638
self._nodes_to_keep_annotations)
2640
# Remove it from the cache since it will be changing
2641
parent_fulltext_content = self._fulltext_contents.pop(compression_parent)
2642
# Make sure to copy the fulltext since it might be
2644
parent_fulltext = list(parent_fulltext_content.text())
2646
parent_fulltext_content = self._fulltext_contents[compression_parent]
2647
parent_fulltext = parent_fulltext_content.text()
2648
comp_children.remove(rev_id)
2649
fulltext_content, delta = self._knit._factory.parse_record(
2650
rev_id, record, record_details,
2651
parent_fulltext_content,
2652
copy_base_content=(not reuse_content))
2653
fulltext = self._add_fulltext_content(rev_id,
2655
blocks = KnitContent.get_line_delta_blocks(delta,
2656
parent_fulltext, fulltext)
2658
fulltext_content = self._knit._factory.parse_fulltext(
2660
fulltext = self._add_fulltext_content(rev_id,
2663
nodes_to_annotate.extend(
2664
self._add_annotation(rev_id, fulltext, parent_ids,
2665
left_matching_blocks=blocks))
2667
def _get_heads_provider(self):
2668
"""Create a heads provider for resolving ancestry issues."""
2669
if self._heads_provider is not None:
2670
return self._heads_provider
2671
parent_provider = _mod_graph.DictParentsProvider(
2672
self._revision_id_graph)
2673
graph_obj = _mod_graph.Graph(parent_provider)
2674
head_cache = _mod_graph.FrozenHeadsCache(graph_obj)
2675
self._heads_provider = head_cache
2678
def annotate(self, key):
2679
"""Return the annotated fulltext at the given key.
2681
:param key: The key to annotate.
2683
if True or len(self._knit._fallback_vfs) > 0:
2684
# stacked knits can't use the fast path at present.
2685
return self._simple_annotate(key)
2686
records = self._get_build_graph(key)
2687
if key in self._ghosts:
2688
raise errors.RevisionNotPresent(key, self._knit)
2689
self._annotate_records(records)
2690
return self._annotated_lines[key]
2692
def _simple_annotate(self, key):
2693
"""Return annotated fulltext, rediffing from the full texts.
2695
This is slow but makes no assumptions about the repository
2696
being able to produce line deltas.
2698
# TODO: this code generates a parent maps of present ancestors; it
2699
# could be split out into a separate method, and probably should use
2700
# iter_ancestry instead. -- mbp and robertc 20080704
2701
graph = Graph(self._knit)
2702
head_cache = _mod_graph.FrozenHeadsCache(graph)
2703
search = graph._make_breadth_first_searcher([key])
2707
present, ghosts = search.next_with_ghosts()
2708
except StopIteration:
2710
keys.update(present)
2711
parent_map = self._knit.get_parent_map(keys)
2713
reannotate = annotate.reannotate
2714
for record in self._knit.get_record_stream(keys, 'topological', True):
2716
fulltext = split_lines(record.get_bytes_as('fulltext'))
2717
parents = parent_map[key]
2718
if parents is not None:
2719
parent_lines = [parent_cache[parent] for parent in parent_map[key]]
2722
parent_cache[key] = list(
2723
reannotate(parent_lines, fulltext, key, None, head_cache))
2725
return parent_cache[key]
2727
raise errors.RevisionNotPresent(key, self._knit)
2731
from bzrlib._knit_load_data_c import _load_data_c as _load_data
2733
from bzrlib._knit_load_data_py import _load_data_py as _load_data