1
# Copyright (C) 2005, 2006, 2007 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
"""Knit versionedfile implementation.
19
A knit is a versioned file implementation that supports efficient append only
23
lifeless: the data file is made up of "delta records". each delta record has a delta header
24
that contains; (1) a version id, (2) the size of the delta (in lines), and (3) the digest of
25
the -expanded data- (ie, the delta applied to the parent). the delta also ends with a
26
end-marker; simply "end VERSION"
28
delta can be line or full contents.a
29
... the 8's there are the index number of the annotation.
30
version robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad 7 c7d23b2a5bd6ca00e8e266cec0ec228158ee9f9e
34
8 e.set('executable', 'yes')
36
8 if elt.get('executable') == 'yes':
37
8 ie.executable = True
38
end robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad
42
09:33 < jrydberg> lifeless: each index is made up of a tuple of; version id, options, position, size, parents
43
09:33 < jrydberg> lifeless: the parents are currently dictionary compressed
44
09:33 < jrydberg> lifeless: (meaning it currently does not support ghosts)
45
09:33 < lifeless> right
46
09:33 < jrydberg> lifeless: the position and size is the range in the data file
49
so the index sequence is the dictionary compressed sequence number used
50
in the deltas to provide line annotation
55
# 10:16 < lifeless> make partial index writes safe
56
# 10:16 < lifeless> implement 'knit.check()' like weave.check()
57
# 10:17 < lifeless> record known ghosts so we can detect when they are filled in rather than the current 'reweave
59
# move sha1 out of the content so that join is faster at verifying parents
60
# record content length ?
63
from cStringIO import StringIO
64
from itertools import izip, chain
70
from zlib import Z_DEFAULT_COMPRESSION
73
from bzrlib.lazy_import import lazy_import
74
lazy_import(globals(), """
95
from bzrlib.errors import (
103
RevisionAlreadyPresent,
105
from bzrlib.graph import Graph
106
from bzrlib.osutils import (
113
from bzrlib.tsort import topo_sort
114
from bzrlib.tuned_gzip import GzipFile, bytes_to_gzip
116
from bzrlib.versionedfile import (
117
AbsentContentFactory,
121
FulltextContentFactory,
128
# TODO: Split out code specific to this format into an associated object.
130
# TODO: Can we put in some kind of value to check that the index and data
131
# files belong together?
133
# TODO: accommodate binaries, perhaps by storing a byte count
135
# TODO: function to check whole file
137
# TODO: atomically append data, then measure backwards from the cursor
138
# position after writing to work out where it was located. we may need to
139
# bypass python file buffering.
141
DATA_SUFFIX = '.knit'
142
INDEX_SUFFIX = '.kndx'
145
class KnitAdapter(object):
146
"""Base class for knit record adaption."""
148
def __init__(self, basis_vf):
149
"""Create an adapter which accesses full texts from basis_vf.
151
:param basis_vf: A versioned file to access basis texts of deltas from.
152
May be None for adapters that do not need to access basis texts.
154
self._data = KnitVersionedFiles(None, None)
155
self._annotate_factory = KnitAnnotateFactory()
156
self._plain_factory = KnitPlainFactory()
157
self._basis_vf = basis_vf
160
class FTAnnotatedToUnannotated(KnitAdapter):
161
"""An adapter from FT annotated knits to unannotated ones."""
163
def get_bytes(self, factory, annotated_compressed_bytes):
165
self._data._parse_record_unchecked(annotated_compressed_bytes)
166
content = self._annotate_factory.parse_fulltext(contents, rec[1])
167
size, bytes = self._data._record_to_data((rec[1],), rec[3], content.text())
171
class DeltaAnnotatedToUnannotated(KnitAdapter):
172
"""An adapter for deltas from annotated to unannotated."""
174
def get_bytes(self, factory, annotated_compressed_bytes):
176
self._data._parse_record_unchecked(annotated_compressed_bytes)
177
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
179
contents = self._plain_factory.lower_line_delta(delta)
180
size, bytes = self._data._record_to_data((rec[1],), rec[3], contents)
184
class FTAnnotatedToFullText(KnitAdapter):
185
"""An adapter from FT annotated knits to unannotated ones."""
187
def get_bytes(self, factory, annotated_compressed_bytes):
189
self._data._parse_record_unchecked(annotated_compressed_bytes)
190
content, delta = self._annotate_factory.parse_record(factory.key[-1],
191
contents, factory._build_details, None)
192
return ''.join(content.text())
195
class DeltaAnnotatedToFullText(KnitAdapter):
196
"""An adapter for deltas from annotated to unannotated."""
198
def get_bytes(self, factory, annotated_compressed_bytes):
200
self._data._parse_record_unchecked(annotated_compressed_bytes)
201
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
203
compression_parent = factory.parents[0]
204
basis_entry = self._basis_vf.get_record_stream(
205
[compression_parent], 'unordered', True).next()
206
if basis_entry.storage_kind == 'absent':
207
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
208
basis_lines = split_lines(basis_entry.get_bytes_as('fulltext'))
209
# Manually apply the delta because we have one annotated content and
211
basis_content = PlainKnitContent(basis_lines, compression_parent)
212
basis_content.apply_delta(delta, rec[1])
213
basis_content._should_strip_eol = factory._build_details[1]
214
return ''.join(basis_content.text())
217
class FTPlainToFullText(KnitAdapter):
218
"""An adapter from FT plain knits to unannotated ones."""
220
def get_bytes(self, factory, compressed_bytes):
222
self._data._parse_record_unchecked(compressed_bytes)
223
content, delta = self._plain_factory.parse_record(factory.key[-1],
224
contents, factory._build_details, None)
225
return ''.join(content.text())
228
class DeltaPlainToFullText(KnitAdapter):
229
"""An adapter for deltas from annotated to unannotated."""
231
def get_bytes(self, factory, compressed_bytes):
233
self._data._parse_record_unchecked(compressed_bytes)
234
delta = self._plain_factory.parse_line_delta(contents, rec[1])
235
compression_parent = factory.parents[0]
236
# XXX: string splitting overhead.
237
basis_entry = self._basis_vf.get_record_stream(
238
[compression_parent], 'unordered', True).next()
239
if basis_entry.storage_kind == 'absent':
240
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
241
basis_lines = split_lines(basis_entry.get_bytes_as('fulltext'))
242
basis_content = PlainKnitContent(basis_lines, compression_parent)
243
# Manually apply the delta because we have one annotated content and
245
content, _ = self._plain_factory.parse_record(rec[1], contents,
246
factory._build_details, basis_content)
247
return ''.join(content.text())
250
class KnitContentFactory(ContentFactory):
251
"""Content factory for streaming from knits.
253
:seealso ContentFactory:
256
def __init__(self, key, parents, build_details, sha1, raw_record,
257
annotated, knit=None):
258
"""Create a KnitContentFactory for key.
261
:param parents: The parents.
262
:param build_details: The build details as returned from
264
:param sha1: The sha1 expected from the full text of this object.
265
:param raw_record: The bytes of the knit data from disk.
266
:param annotated: True if the raw data is annotated.
268
ContentFactory.__init__(self)
271
self.parents = parents
272
if build_details[0] == 'line-delta':
277
annotated_kind = 'annotated-'
280
self.storage_kind = 'knit-%s%s-gz' % (annotated_kind, kind)
281
self._raw_record = raw_record
282
self._build_details = build_details
285
def get_bytes_as(self, storage_kind):
286
if storage_kind == self.storage_kind:
287
return self._raw_record
288
if storage_kind == 'fulltext' and self._knit is not None:
289
return self._knit.get_text(self.key[0])
291
raise errors.UnavailableRepresentation(self.key, storage_kind,
295
class KnitContent(object):
296
"""Content of a knit version to which deltas can be applied.
298
This is always stored in memory as a list of lines with \n at the end,
299
plus a flag saying if the final ending is really there or not, because that
300
corresponds to the on-disk knit representation.
304
self._should_strip_eol = False
306
def apply_delta(self, delta, new_version_id):
307
"""Apply delta to this object to become new_version_id."""
308
raise NotImplementedError(self.apply_delta)
310
def line_delta_iter(self, new_lines):
311
"""Generate line-based delta from this content to new_lines."""
312
new_texts = new_lines.text()
313
old_texts = self.text()
314
s = patiencediff.PatienceSequenceMatcher(None, old_texts, new_texts)
315
for tag, i1, i2, j1, j2 in s.get_opcodes():
318
# ofrom, oto, length, data
319
yield i1, i2, j2 - j1, new_lines._lines[j1:j2]
321
def line_delta(self, new_lines):
322
return list(self.line_delta_iter(new_lines))
325
def get_line_delta_blocks(knit_delta, source, target):
326
"""Extract SequenceMatcher.get_matching_blocks() from a knit delta"""
327
target_len = len(target)
330
for s_begin, s_end, t_len, new_text in knit_delta:
331
true_n = s_begin - s_pos
334
# knit deltas do not provide reliable info about whether the
335
# last line of a file matches, due to eol handling.
336
if source[s_pos + n -1] != target[t_pos + n -1]:
339
yield s_pos, t_pos, n
340
t_pos += t_len + true_n
342
n = target_len - t_pos
344
if source[s_pos + n -1] != target[t_pos + n -1]:
347
yield s_pos, t_pos, n
348
yield s_pos + (target_len - t_pos), target_len, 0
351
class AnnotatedKnitContent(KnitContent):
352
"""Annotated content."""
354
def __init__(self, lines):
355
KnitContent.__init__(self)
359
"""Return a list of (origin, text) for each content line."""
360
lines = self._lines[:]
361
if self._should_strip_eol:
362
origin, last_line = lines[-1]
363
lines[-1] = (origin, last_line.rstrip('\n'))
366
def apply_delta(self, delta, new_version_id):
367
"""Apply delta to this object to become new_version_id."""
370
for start, end, count, delta_lines in delta:
371
lines[offset+start:offset+end] = delta_lines
372
offset = offset + (start - end) + count
376
lines = [text for origin, text in self._lines]
377
except ValueError, e:
378
# most commonly (only?) caused by the internal form of the knit
379
# missing annotation information because of a bug - see thread
381
raise KnitCorrupt(self,
382
"line in annotated knit missing annotation information: %s"
384
if self._should_strip_eol:
385
lines[-1] = lines[-1].rstrip('\n')
389
return AnnotatedKnitContent(self._lines[:])
392
class PlainKnitContent(KnitContent):
393
"""Unannotated content.
395
When annotate[_iter] is called on this content, the same version is reported
396
for all lines. Generally, annotate[_iter] is not useful on PlainKnitContent
400
def __init__(self, lines, version_id):
401
KnitContent.__init__(self)
403
self._version_id = version_id
406
"""Return a list of (origin, text) for each content line."""
407
return [(self._version_id, line) for line in self._lines]
409
def apply_delta(self, delta, new_version_id):
410
"""Apply delta to this object to become new_version_id."""
413
for start, end, count, delta_lines in delta:
414
lines[offset+start:offset+end] = delta_lines
415
offset = offset + (start - end) + count
416
self._version_id = new_version_id
419
return PlainKnitContent(self._lines[:], self._version_id)
423
if self._should_strip_eol:
425
lines[-1] = lines[-1].rstrip('\n')
429
class _KnitFactory(object):
430
"""Base class for common Factory functions."""
432
def parse_record(self, version_id, record, record_details,
433
base_content, copy_base_content=True):
434
"""Parse a record into a full content object.
436
:param version_id: The official version id for this content
437
:param record: The data returned by read_records_iter()
438
:param record_details: Details about the record returned by
440
:param base_content: If get_build_details returns a compression_parent,
441
you must return a base_content here, else use None
442
:param copy_base_content: When building from the base_content, decide
443
you can either copy it and return a new object, or modify it in
445
:return: (content, delta) A Content object and possibly a line-delta,
448
method, noeol = record_details
449
if method == 'line-delta':
450
if copy_base_content:
451
content = base_content.copy()
453
content = base_content
454
delta = self.parse_line_delta(record, version_id)
455
content.apply_delta(delta, version_id)
457
content = self.parse_fulltext(record, version_id)
459
content._should_strip_eol = noeol
460
return (content, delta)
463
class KnitAnnotateFactory(_KnitFactory):
464
"""Factory for creating annotated Content objects."""
468
def make(self, lines, version_id):
469
num_lines = len(lines)
470
return AnnotatedKnitContent(zip([version_id] * num_lines, lines))
472
def parse_fulltext(self, content, version_id):
473
"""Convert fulltext to internal representation
475
fulltext content is of the format
476
revid(utf8) plaintext\n
477
internal representation is of the format:
480
# TODO: jam 20070209 The tests expect this to be returned as tuples,
481
# but the code itself doesn't really depend on that.
482
# Figure out a way to not require the overhead of turning the
483
# list back into tuples.
484
lines = [tuple(line.split(' ', 1)) for line in content]
485
return AnnotatedKnitContent(lines)
487
def parse_line_delta_iter(self, lines):
488
return iter(self.parse_line_delta(lines))
490
def parse_line_delta(self, lines, version_id, plain=False):
491
"""Convert a line based delta into internal representation.
493
line delta is in the form of:
494
intstart intend intcount
496
revid(utf8) newline\n
497
internal representation is
498
(start, end, count, [1..count tuples (revid, newline)])
500
:param plain: If True, the lines are returned as a plain
501
list without annotations, not as a list of (origin, content) tuples, i.e.
502
(start, end, count, [1..count newline])
509
def cache_and_return(line):
510
origin, text = line.split(' ', 1)
511
return cache.setdefault(origin, origin), text
513
# walk through the lines parsing.
514
# Note that the plain test is explicitly pulled out of the
515
# loop to minimise any performance impact
518
start, end, count = [int(n) for n in header.split(',')]
519
contents = [next().split(' ', 1)[1] for i in xrange(count)]
520
result.append((start, end, count, contents))
523
start, end, count = [int(n) for n in header.split(',')]
524
contents = [tuple(next().split(' ', 1)) for i in xrange(count)]
525
result.append((start, end, count, contents))
528
def get_fulltext_content(self, lines):
529
"""Extract just the content lines from a fulltext."""
530
return (line.split(' ', 1)[1] for line in lines)
532
def get_linedelta_content(self, lines):
533
"""Extract just the content from a line delta.
535
This doesn't return all of the extra information stored in a delta.
536
Only the actual content lines.
541
header = header.split(',')
542
count = int(header[2])
543
for i in xrange(count):
544
origin, text = next().split(' ', 1)
547
def lower_fulltext(self, content):
548
"""convert a fulltext content record into a serializable form.
550
see parse_fulltext which this inverts.
552
# TODO: jam 20070209 We only do the caching thing to make sure that
553
# the origin is a valid utf-8 line, eventually we could remove it
554
return ['%s %s' % (o, t) for o, t in content._lines]
556
def lower_line_delta(self, delta):
557
"""convert a delta into a serializable form.
559
See parse_line_delta which this inverts.
561
# TODO: jam 20070209 We only do the caching thing to make sure that
562
# the origin is a valid utf-8 line, eventually we could remove it
564
for start, end, c, lines in delta:
565
out.append('%d,%d,%d\n' % (start, end, c))
566
out.extend(origin + ' ' + text
567
for origin, text in lines)
570
def annotate(self, knit, key):
571
content = knit._get_content(key)
572
# adjust for the fact that serialised annotations are only key suffixes
574
if type(key) == tuple:
576
origins = content.annotate()
578
for origin, line in origins:
579
result.append((prefix + (origin,), line))
582
# XXX: This smells a bit. Why would key ever be a non-tuple here?
583
# Aren't keys defined to be tuples? -- spiv 20080618
584
return content.annotate()
587
class KnitPlainFactory(_KnitFactory):
588
"""Factory for creating plain Content objects."""
592
def make(self, lines, version_id):
593
return PlainKnitContent(lines, version_id)
595
def parse_fulltext(self, content, version_id):
596
"""This parses an unannotated fulltext.
598
Note that this is not a noop - the internal representation
599
has (versionid, line) - its just a constant versionid.
601
return self.make(content, version_id)
603
def parse_line_delta_iter(self, lines, version_id):
605
num_lines = len(lines)
606
while cur < num_lines:
609
start, end, c = [int(n) for n in header.split(',')]
610
yield start, end, c, lines[cur:cur+c]
613
def parse_line_delta(self, lines, version_id):
614
return list(self.parse_line_delta_iter(lines, version_id))
616
def get_fulltext_content(self, lines):
617
"""Extract just the content lines from a fulltext."""
620
def get_linedelta_content(self, lines):
621
"""Extract just the content from a line delta.
623
This doesn't return all of the extra information stored in a delta.
624
Only the actual content lines.
629
header = header.split(',')
630
count = int(header[2])
631
for i in xrange(count):
634
def lower_fulltext(self, content):
635
return content.text()
637
def lower_line_delta(self, delta):
639
for start, end, c, lines in delta:
640
out.append('%d,%d,%d\n' % (start, end, c))
644
def annotate(self, knit, key):
645
annotator = _KnitAnnotator(knit)
646
return annotator.annotate(key)
650
def make_file_factory(annotated, mapper):
651
"""Create a factory for creating a file based KnitVersionedFiles.
653
This is only functional enough to run interface tests, it doesn't try to
654
provide a full pack environment.
656
:param annotated: knit annotations are wanted.
657
:param mapper: The mapper from keys to paths.
659
def factory(transport):
660
index = _KndxIndex(transport, mapper, lambda:None, lambda:True, lambda:True)
661
access = _KnitKeyAccess(transport, mapper)
662
return KnitVersionedFiles(index, access, annotated=annotated)
666
def make_pack_factory(graph, delta, keylength):
667
"""Create a factory for creating a pack based VersionedFiles.
669
This is only functional enough to run interface tests, it doesn't try to
670
provide a full pack environment.
672
:param graph: Store a graph.
673
:param delta: Delta compress contents.
674
:param keylength: How long should keys be.
676
def factory(transport):
677
parents = graph or delta
683
max_delta_chain = 200
686
graph_index = _mod_index.InMemoryGraphIndex(reference_lists=ref_length,
687
key_elements=keylength)
688
stream = transport.open_write_stream('newpack')
689
writer = pack.ContainerWriter(stream.write)
691
index = _KnitGraphIndex(graph_index, lambda:True, parents=parents,
692
deltas=delta, add_callback=graph_index.add_nodes)
693
access = _DirectPackAccess({})
694
access.set_writer(writer, graph_index, (transport, 'newpack'))
695
result = KnitVersionedFiles(index, access,
696
max_delta_chain=max_delta_chain)
697
result.stream = stream
698
result.writer = writer
703
def cleanup_pack_knit(versioned_files):
704
versioned_files.stream.close()
705
versioned_files.writer.end()
708
class KnitVersionedFiles(VersionedFiles):
709
"""Storage for many versioned files using knit compression.
711
Backend storage is managed by indices and data objects.
714
def __init__(self, index, data_access, max_delta_chain=200,
716
"""Create a KnitVersionedFiles with index and data_access.
718
:param index: The index for the knit data.
719
:param data_access: The access object to store and retrieve knit
721
:param max_delta_chain: The maximum number of deltas to permit during
722
insertion. Set to 0 to prohibit the use of deltas.
723
:param annotated: Set to True to cause annotations to be calculated and
724
stored during insertion.
727
self._access = data_access
728
self._max_delta_chain = max_delta_chain
730
self._factory = KnitAnnotateFactory()
732
self._factory = KnitPlainFactory()
733
self._fallback_vfs = []
735
def add_fallback_versioned_files(self, a_versioned_files):
736
"""Add a source of texts for texts not present in this knit.
738
:param a_versioned_files: A VersionedFiles object.
740
self._fallback_vfs.append(a_versioned_files)
742
def add_lines(self, key, parents, lines, parent_texts=None,
743
left_matching_blocks=None, nostore_sha=None, random_id=False,
745
"""See VersionedFiles.add_lines()."""
746
self._index._check_write_ok()
747
self._check_add(key, lines, random_id, check_content)
749
# The caller might pass None if there is no graph data, but kndx
750
# indexes can't directly store that, so we give them
751
# an empty tuple instead.
753
return self._add(key, lines, parents,
754
parent_texts, left_matching_blocks, nostore_sha, random_id)
756
def _add(self, key, lines, parents, parent_texts,
757
left_matching_blocks, nostore_sha, random_id):
758
"""Add a set of lines on top of version specified by parents.
760
Any versions not present will be converted into ghosts.
762
# first thing, if the content is something we don't need to store, find
764
line_bytes = ''.join(lines)
765
digest = sha_string(line_bytes)
766
if nostore_sha == digest:
767
raise errors.ExistingContent
770
if parent_texts is None:
772
# Do a single query to ascertain parent presence.
773
present_parent_map = self.get_parent_map(parents)
774
for parent in parents:
775
if parent in present_parent_map:
776
present_parents.append(parent)
778
# Currently we can only compress against the left most present parent.
779
if (len(present_parents) == 0 or
780
present_parents[0] != parents[0]):
783
# To speed the extract of texts the delta chain is limited
784
# to a fixed number of deltas. This should minimize both
785
# I/O and the time spend applying deltas.
786
delta = self._check_should_delta(present_parents[0])
788
text_length = len(line_bytes)
791
if lines[-1][-1] != '\n':
792
# copy the contents of lines.
794
options.append('no-eol')
795
lines[-1] = lines[-1] + '\n'
799
if type(element) != str:
800
raise TypeError("key contains non-strings: %r" % (key,))
801
# Knit hunks are still last-element only
803
content = self._factory.make(lines, version_id)
804
if 'no-eol' in options:
805
# Hint to the content object that its text() call should strip the
807
content._should_strip_eol = True
808
if delta or (self._factory.annotated and len(present_parents) > 0):
809
# Merge annotations from parent texts if needed.
810
delta_hunks = self._merge_annotations(content, present_parents,
811
parent_texts, delta, self._factory.annotated,
812
left_matching_blocks)
815
options.append('line-delta')
816
store_lines = self._factory.lower_line_delta(delta_hunks)
817
size, bytes = self._record_to_data(key, digest,
820
options.append('fulltext')
821
# isinstance is slower and we have no hierarchy.
822
if self._factory.__class__ == KnitPlainFactory:
823
# Use the already joined bytes saving iteration time in
825
size, bytes = self._record_to_data(key, digest,
828
# get mixed annotation + content and feed it into the
830
store_lines = self._factory.lower_fulltext(content)
831
size, bytes = self._record_to_data(key, digest,
834
access_memo = self._access.add_raw_records([(key, size)], bytes)[0]
835
self._index.add_records(
836
((key, options, access_memo, parents),),
838
return digest, text_length, content
840
def annotate(self, key):
841
"""See VersionedFiles.annotate."""
842
return self._factory.annotate(self, key)
844
def check(self, progress_bar=None):
845
"""See VersionedFiles.check()."""
846
# This doesn't actually test extraction of everything, but that will
847
# impact 'bzr check' substantially, and needs to be integrated with
848
# care. However, it does check for the obvious problem of a delta with
851
parent_map = self.get_parent_map(keys)
853
if self._index.get_method(key) != 'fulltext':
854
compression_parent = parent_map[key][0]
855
if compression_parent not in parent_map:
856
raise errors.KnitCorrupt(self,
857
"Missing basis parent %s for %s" % (
858
compression_parent, key))
860
def _check_add(self, key, lines, random_id, check_content):
861
"""check that version_id and lines are safe to add."""
863
if contains_whitespace(version_id):
864
raise InvalidRevisionId(version_id, self.filename)
865
self.check_not_reserved_id(version_id)
866
# TODO: If random_id==False and the key is already present, we should
867
# probably check that the existing content is identical to what is
868
# being inserted, and otherwise raise an exception. This would make
869
# the bundle code simpler.
871
self._check_lines_not_unicode(lines)
872
self._check_lines_are_lines(lines)
874
def _check_header(self, key, line):
875
rec = self._split_header(line)
876
self._check_header_version(rec, key[-1])
879
def _check_header_version(self, rec, version_id):
880
"""Checks the header version on original format knit records.
882
These have the last component of the key embedded in the record.
884
if rec[1] != version_id:
885
raise KnitCorrupt(self,
886
'unexpected version, wanted %r, got %r' % (version_id, rec[1]))
888
def _check_should_delta(self, parent):
889
"""Iterate back through the parent listing, looking for a fulltext.
891
This is used when we want to decide whether to add a delta or a new
892
fulltext. It searches for _max_delta_chain parents. When it finds a
893
fulltext parent, it sees if the total size of the deltas leading up to
894
it is large enough to indicate that we want a new full text anyway.
896
Return True if we should create a new delta, False if we should use a
901
for count in xrange(self._max_delta_chain):
902
# XXX: Collapse these two queries:
904
method = self._index.get_method(parent)
905
except RevisionNotPresent:
906
# Some basis is not locally present: always delta
908
index, pos, size = self._index.get_position(parent)
909
if method == 'fulltext':
913
# We don't explicitly check for presence because this is in an
914
# inner loop, and if it's missing it'll fail anyhow.
915
# TODO: This should be asking for compression parent, not graph
917
parent = self._index.get_parent_map([parent])[parent][0]
919
# We couldn't find a fulltext, so we must create a new one
921
# Simple heuristic - if the total I/O wold be greater as a delta than
922
# the originally installed fulltext, we create a new fulltext.
923
return fulltext_size > delta_size
925
def _build_details_to_components(self, build_details):
926
"""Convert a build_details tuple to a position tuple."""
927
# record_details, access_memo, compression_parent
928
return build_details[3], build_details[0], build_details[1]
930
def _get_components_positions(self, keys, allow_missing=False):
931
"""Produce a map of position data for the components of keys.
933
This data is intended to be used for retrieving the knit records.
935
A dict of key to (record_details, index_memo, next, parents) is
937
method is the way referenced data should be applied.
938
index_memo is the handle to pass to the data access to actually get the
940
next is the build-parent of the version, or None for fulltexts.
941
parents is the version_ids of the parents of this version
943
:param allow_missing: If True do not raise an error on a missing component,
947
pending_components = keys
948
while pending_components:
949
build_details = self._index.get_build_details(pending_components)
950
current_components = set(pending_components)
951
pending_components = set()
952
for key, details in build_details.iteritems():
953
(index_memo, compression_parent, parents,
954
record_details) = details
955
method = record_details[0]
956
if compression_parent is not None:
957
pending_components.add(compression_parent)
958
component_data[key] = self._build_details_to_components(details)
959
missing = current_components.difference(build_details)
960
if missing and not allow_missing:
961
raise errors.RevisionNotPresent(missing.pop(), self)
962
return component_data
964
def _get_content(self, key, parent_texts={}):
965
"""Returns a content object that makes up the specified
967
cached_version = parent_texts.get(key, None)
968
if cached_version is not None:
969
# Ensure the cache dict is valid.
970
if not self.get_parent_map([key]):
971
raise RevisionNotPresent(key, self)
972
return cached_version
973
text_map, contents_map = self._get_content_maps([key])
974
return contents_map[key]
976
def _get_content_maps(self, keys, nonlocal_keys=None):
977
"""Produce maps of text and KnitContents
979
:param keys: The keys to produce content maps for.
980
:param nonlocal_keys: An iterable of keys(possibly intersecting keys)
981
which are known to not be in this knit, but rather in one of the
983
:return: (text_map, content_map) where text_map contains the texts for
984
the requested versions and content_map contains the KnitContents.
986
# FUTURE: This function could be improved for the 'extract many' case
987
# by tracking each component and only doing the copy when the number of
988
# children than need to apply delta's to it is > 1 or it is part of the
991
multiple_versions = len(keys) != 1
992
record_map = self._get_record_map(keys, allow_missing=True)
997
if nonlocal_keys is None:
998
nonlocal_keys = set()
1000
nonlocal_keys = frozenset(nonlocal_keys)
1001
missing_keys = set(nonlocal_keys)
1002
for source in self._fallback_vfs:
1003
if not missing_keys:
1005
for record in source.get_record_stream(missing_keys,
1007
if record.storage_kind == 'absent':
1009
missing_keys.remove(record.key)
1010
bytes = record.get_bytes_as('fulltext')
1011
lines = split_lines(record.get_bytes_as('fulltext'))
1012
text_map[record.key] = lines
1013
content_map[record.key] = PlainKnitContent(lines, record.key)
1014
if record.key in keys:
1015
final_content[record.key] = content_map[record.key]
1017
if key in nonlocal_keys:
1022
while cursor is not None:
1024
record, record_details, digest, next = record_map[cursor]
1026
raise RevisionNotPresent(cursor, self)
1027
components.append((cursor, record, record_details, digest))
1029
if cursor in content_map:
1030
# no need to plan further back
1031
components.append((cursor, None, None, None))
1035
for (component_id, record, record_details,
1036
digest) in reversed(components):
1037
if component_id in content_map:
1038
content = content_map[component_id]
1040
content, delta = self._factory.parse_record(key[-1],
1041
record, record_details, content,
1042
copy_base_content=multiple_versions)
1043
if multiple_versions:
1044
content_map[component_id] = content
1046
final_content[key] = content
1048
# digest here is the digest from the last applied component.
1049
text = content.text()
1050
actual_sha = sha_strings(text)
1051
if actual_sha != digest:
1052
raise KnitCorrupt(self,
1054
'\n of reconstructed text does not match'
1056
'\n for version %s' %
1057
(actual_sha, digest, key))
1058
text_map[key] = text
1059
return text_map, final_content
1061
def get_parent_map(self, keys):
1062
"""Get a map of the parents of keys.
1064
:param keys: The keys to look up parents for.
1065
:return: A mapping from keys to parents. Absent keys are absent from
1068
return self._get_parent_map(keys)[0]
1070
def _get_parent_map(self, keys):
1071
"""Get a map of the parents of keys.
1073
:param keys: The keys to look up parents for.
1074
:return: A tuple. The first element is a mapping from keys to parents.
1075
Absent keys are absent from the mapping. The second element is a
1076
list with the locations each key was found in. The first element
1077
is the in-this-knit parents, the second the first fallback source,
1081
sources = [self._index] + self._fallback_vfs
1084
for source in sources:
1087
new_result = source.get_parent_map(missing)
1088
source_results.append(new_result)
1089
result.update(new_result)
1090
missing.difference_update(set(new_result))
1091
return result, source_results
1093
def _get_record_map(self, keys, allow_missing=False):
1094
"""Produce a dictionary of knit records.
1096
:return: {key:(record, record_details, digest, next)}
1098
data returned from read_records
1100
opaque information to pass to parse_record
1102
SHA1 digest of the full text after all steps are done
1104
build-parent of the version, i.e. the leftmost ancestor.
1105
Will be None if the record is not a delta.
1106
:param keys: The keys to build a map for
1107
:param allow_missing: If some records are missing, rather than
1108
error, just return the data that could be generated.
1110
position_map = self._get_components_positions(keys,
1111
allow_missing=allow_missing)
1112
# key = component_id, r = record_details, i_m = index_memo, n = next
1113
records = [(key, i_m) for key, (r, i_m, n)
1114
in position_map.iteritems()]
1116
for key, record, digest in \
1117
self._read_records_iter(records):
1118
(record_details, index_memo, next) = position_map[key]
1119
record_map[key] = record, record_details, digest, next
1122
def get_record_stream(self, keys, ordering, include_delta_closure):
1123
"""Get a stream of records for keys.
1125
:param keys: The keys to include.
1126
:param ordering: Either 'unordered' or 'topological'. A topologically
1127
sorted stream has compression parents strictly before their
1129
:param include_delta_closure: If True then the closure across any
1130
compression parents will be included (in the opaque data).
1131
:return: An iterator of ContentFactory objects, each of which is only
1132
valid until the iterator is advanced.
1134
# keys might be a generator
1138
if not self._index.has_graph:
1139
# Cannot topological order when no graph has been stored.
1140
ordering = 'unordered'
1141
if include_delta_closure:
1142
positions = self._get_components_positions(keys, allow_missing=True)
1144
build_details = self._index.get_build_details(keys)
1146
# (record_details, access_memo, compression_parent_key)
1147
positions = dict((key, self._build_details_to_components(details))
1148
for key, details in build_details.iteritems())
1149
absent_keys = keys.difference(set(positions))
1150
# There may be more absent keys : if we're missing the basis component
1151
# and are trying to include the delta closure.
1152
if include_delta_closure:
1153
needed_from_fallback = set()
1154
# Build up reconstructable_keys dict. key:True in this dict means
1155
# the key can be reconstructed.
1156
reconstructable_keys = {}
1160
chain = [key, positions[key][2]]
1162
needed_from_fallback.add(key)
1165
while chain[-1] is not None:
1166
if chain[-1] in reconstructable_keys:
1167
result = reconstructable_keys[chain[-1]]
1171
chain.append(positions[chain[-1]][2])
1173
# missing basis component
1174
needed_from_fallback.add(chain[-1])
1177
for chain_key in chain[:-1]:
1178
reconstructable_keys[chain_key] = result
1180
needed_from_fallback.add(key)
1181
# Double index lookups here : need a unified api ?
1182
global_map, parent_maps = self._get_parent_map(keys)
1183
if ordering == 'topological':
1184
# Global topological sort
1185
present_keys = topo_sort(global_map)
1186
# Now group by source:
1188
current_source = None
1189
for key in present_keys:
1190
for parent_map in parent_maps:
1191
if key in parent_map:
1192
key_source = parent_map
1194
if current_source is not key_source:
1195
source_keys.append((key_source, []))
1196
current_source = key_source
1197
source_keys[-1][1].append(key)
1199
# Just group by source; remote sources first.
1202
for parent_map in reversed(parent_maps):
1203
source_keys.append((parent_map, []))
1204
for key in parent_map:
1205
present_keys.append(key)
1206
source_keys[-1][1].append(key)
1207
absent_keys = keys - set(global_map)
1208
for key in absent_keys:
1209
yield AbsentContentFactory(key)
1210
# restrict our view to the keys we can answer.
1211
our_keys = parent_maps[0]
1212
# keys - needed_from_fallback
1213
# keys = keys - absent_keys
1214
# XXX: Memory: TODO: batch data here to cap buffered data at (say) 1MB.
1215
# XXX: At that point we need to consider the impact of double reads by
1216
# utilising components multiple times.
1217
if include_delta_closure:
1218
# XXX: get_content_maps performs its own index queries; allow state
1220
text_map, _ = self._get_content_maps(present_keys,
1221
needed_from_fallback - absent_keys)
1222
for key in present_keys:
1223
yield FulltextContentFactory(key, global_map[key], None,
1224
''.join(text_map[key]))
1226
for source, keys in source_keys:
1227
if source is parent_maps[0]:
1228
# this KnitVersionedFiles
1229
records = [(key, positions[key][1]) for key in keys]
1230
for key, raw_data, sha1 in self._read_records_iter_raw(records):
1231
(record_details, index_memo, _) = positions[key]
1232
yield KnitContentFactory(key, global_map[key],
1233
record_details, sha1, raw_data, self._factory.annotated, None)
1235
vf = self._fallback_vfs[parent_maps.index(source) - 1]
1236
for record in vf.get_record_stream(keys, ordering,
1237
include_delta_closure):
1240
def get_sha1s(self, keys):
1241
"""See VersionedFiles.get_sha1s()."""
1243
record_map = self._get_record_map(missing, allow_missing=True)
1245
for key, details in record_map.iteritems():
1246
if key not in missing:
1248
# record entry 2 is the 'digest'.
1249
result[key] = details[2]
1250
missing.difference_update(set(result))
1251
for source in self._fallback_vfs:
1254
new_result = source.get_sha1s(missing)
1255
result.update(new_result)
1256
missing.difference_update(set(new_result))
1259
def insert_record_stream(self, stream):
1260
"""Insert a record stream into this container.
1262
:param stream: A stream of records to insert.
1264
:seealso VersionedFiles.get_record_stream:
1266
def get_adapter(adapter_key):
1268
return adapters[adapter_key]
1270
adapter_factory = adapter_registry.get(adapter_key)
1271
adapter = adapter_factory(self)
1272
adapters[adapter_key] = adapter
1274
if self._factory.annotated:
1275
# self is annotated, we need annotated knits to use directly.
1276
annotated = "annotated-"
1279
# self is not annotated, but we can strip annotations cheaply.
1281
convertibles = set(["knit-annotated-ft-gz"])
1282
if self._max_delta_chain:
1283
convertibles.add("knit-annotated-delta-gz")
1284
# The set of types we can cheaply adapt without needing basis texts.
1285
native_types = set()
1286
if self._max_delta_chain:
1287
native_types.add("knit-%sdelta-gz" % annotated)
1288
native_types.add("knit-%sft-gz" % annotated)
1289
knit_types = native_types.union(convertibles)
1291
# Buffer all index entries that we can't add immediately because their
1292
# basis parent is missing. We don't buffer all because generating
1293
# annotations may require access to some of the new records. However we
1294
# can't generate annotations from new deltas until their basis parent
1295
# is present anyway, so we get away with not needing an index that
1296
# includes the new keys.
1297
# key = basis_parent, value = index entry to add
1298
buffered_index_entries = {}
1299
for record in stream:
1300
parents = record.parents
1301
# Raise an error when a record is missing.
1302
if record.storage_kind == 'absent':
1303
raise RevisionNotPresent([record.key], self)
1304
if record.storage_kind in knit_types:
1305
if record.storage_kind not in native_types:
1307
adapter_key = (record.storage_kind, "knit-delta-gz")
1308
adapter = get_adapter(adapter_key)
1310
adapter_key = (record.storage_kind, "knit-ft-gz")
1311
adapter = get_adapter(adapter_key)
1312
bytes = adapter.get_bytes(
1313
record, record.get_bytes_as(record.storage_kind))
1315
bytes = record.get_bytes_as(record.storage_kind)
1316
options = [record._build_details[0]]
1317
if record._build_details[1]:
1318
options.append('no-eol')
1319
# Just blat it across.
1320
# Note: This does end up adding data on duplicate keys. As
1321
# modern repositories use atomic insertions this should not
1322
# lead to excessive growth in the event of interrupted fetches.
1323
# 'knit' repositories may suffer excessive growth, but as a
1324
# deprecated format this is tolerable. It can be fixed if
1325
# needed by in the kndx index support raising on a duplicate
1326
# add with identical parents and options.
1327
access_memo = self._access.add_raw_records(
1328
[(record.key, len(bytes))], bytes)[0]
1329
index_entry = (record.key, options, access_memo, parents)
1331
if 'fulltext' not in options:
1332
basis_parent = parents[0]
1333
# Note that pack backed knits don't need to buffer here
1334
# because they buffer all writes to the transaction level,
1335
# but we don't expose that difference at the index level. If
1336
# the query here has sufficient cost to show up in
1337
# profiling we should do that.
1338
if basis_parent not in self.get_parent_map([basis_parent]):
1339
pending = buffered_index_entries.setdefault(
1341
pending.append(index_entry)
1344
self._index.add_records([index_entry])
1345
elif record.storage_kind == 'fulltext':
1346
self.add_lines(record.key, parents,
1347
split_lines(record.get_bytes_as('fulltext')))
1349
adapter_key = record.storage_kind, 'fulltext'
1350
adapter = get_adapter(adapter_key)
1351
lines = split_lines(adapter.get_bytes(
1352
record, record.get_bytes_as(record.storage_kind)))
1354
self.add_lines(record.key, parents, lines)
1355
except errors.RevisionAlreadyPresent:
1357
# Add any records whose basis parent is now available.
1358
added_keys = [record.key]
1360
key = added_keys.pop(0)
1361
if key in buffered_index_entries:
1362
index_entries = buffered_index_entries[key]
1363
self._index.add_records(index_entries)
1365
[index_entry[0] for index_entry in index_entries])
1366
del buffered_index_entries[key]
1367
# If there were any deltas which had a missing basis parent, error.
1368
if buffered_index_entries:
1369
raise errors.RevisionNotPresent(buffered_index_entries.keys()[0],
1372
def iter_lines_added_or_present_in_keys(self, keys, pb=None):
1373
"""Iterate over the lines in the versioned files from keys.
1375
This may return lines from other keys. Each item the returned
1376
iterator yields is a tuple of a line and a text version that that line
1377
is present in (not introduced in).
1379
Ordering of results is in whatever order is most suitable for the
1380
underlying storage format.
1382
If a progress bar is supplied, it may be used to indicate progress.
1383
The caller is responsible for cleaning up progress bars (because this
1387
* Lines are normalised by the underlying store: they will all have \n
1389
* Lines are returned in arbitrary order.
1391
:return: An iterator over (line, key).
1394
pb = progress.DummyProgress()
1397
# we don't care about inclusions, the caller cares.
1398
# but we need to setup a list of records to visit.
1399
# we need key, position, length
1401
build_details = self._index.get_build_details(keys)
1402
for key, details in build_details.iteritems():
1404
key_records.append((key, details[0]))
1406
records_iter = enumerate(self._read_records_iter(key_records))
1407
for (key_idx, (key, data, sha_value)) in records_iter:
1408
pb.update('Walking content.', key_idx, total)
1409
compression_parent = build_details[key][1]
1410
if compression_parent is None:
1412
line_iterator = self._factory.get_fulltext_content(data)
1415
line_iterator = self._factory.get_linedelta_content(data)
1416
# XXX: It might be more efficient to yield (key,
1417
# line_iterator) in the future. However for now, this is a simpler
1418
# change to integrate into the rest of the codebase. RBC 20071110
1419
for line in line_iterator:
1421
for source in self._fallback_vfs:
1425
for line, key in source.iter_lines_added_or_present_in_keys(keys):
1426
source_keys.add(key)
1428
keys.difference_update(source_keys)
1430
raise RevisionNotPresent(keys, self.filename)
1431
pb.update('Walking content.', total, total)
1433
def _make_line_delta(self, delta_seq, new_content):
1434
"""Generate a line delta from delta_seq and new_content."""
1436
for op in delta_seq.get_opcodes():
1437
if op[0] == 'equal':
1439
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
1442
def _merge_annotations(self, content, parents, parent_texts={},
1443
delta=None, annotated=None,
1444
left_matching_blocks=None):
1445
"""Merge annotations for content and generate deltas.
1447
This is done by comparing the annotations based on changes to the text
1448
and generating a delta on the resulting full texts. If annotations are
1449
not being created then a simple delta is created.
1451
if left_matching_blocks is not None:
1452
delta_seq = diff._PrematchedMatcher(left_matching_blocks)
1456
for parent_key in parents:
1457
merge_content = self._get_content(parent_key, parent_texts)
1458
if (parent_key == parents[0] and delta_seq is not None):
1461
seq = patiencediff.PatienceSequenceMatcher(
1462
None, merge_content.text(), content.text())
1463
for i, j, n in seq.get_matching_blocks():
1466
# this copies (origin, text) pairs across to the new
1467
# content for any line that matches the last-checked
1469
content._lines[j:j+n] = merge_content._lines[i:i+n]
1470
# XXX: Robert says the following block is a workaround for a
1471
# now-fixed bug and it can probably be deleted. -- mbp 20080618
1472
if content._lines and content._lines[-1][1][-1] != '\n':
1473
# The copied annotation was from a line without a trailing EOL,
1474
# reinstate one for the content object, to ensure correct
1476
line = content._lines[-1][1] + '\n'
1477
content._lines[-1] = (content._lines[-1][0], line)
1479
if delta_seq is None:
1480
reference_content = self._get_content(parents[0], parent_texts)
1481
new_texts = content.text()
1482
old_texts = reference_content.text()
1483
delta_seq = patiencediff.PatienceSequenceMatcher(
1484
None, old_texts, new_texts)
1485
return self._make_line_delta(delta_seq, content)
1487
def _parse_record(self, version_id, data):
1488
"""Parse an original format knit record.
1490
These have the last element of the key only present in the stored data.
1492
rec, record_contents = self._parse_record_unchecked(data)
1493
self._check_header_version(rec, version_id)
1494
return record_contents, rec[3]
1496
def _parse_record_header(self, key, raw_data):
1497
"""Parse a record header for consistency.
1499
:return: the header and the decompressor stream.
1500
as (stream, header_record)
1502
df = GzipFile(mode='rb', fileobj=StringIO(raw_data))
1505
rec = self._check_header(key, df.readline())
1506
except Exception, e:
1507
raise KnitCorrupt(self,
1508
"While reading {%s} got %s(%s)"
1509
% (key, e.__class__.__name__, str(e)))
1512
def _parse_record_unchecked(self, data):
1514
# 4168 calls in 2880 217 internal
1515
# 4168 calls to _parse_record_header in 2121
1516
# 4168 calls to readlines in 330
1517
df = GzipFile(mode='rb', fileobj=StringIO(data))
1519
record_contents = df.readlines()
1520
except Exception, e:
1521
raise KnitCorrupt(self, "Corrupt compressed record %r, got %s(%s)" %
1522
(data, e.__class__.__name__, str(e)))
1523
header = record_contents.pop(0)
1524
rec = self._split_header(header)
1525
last_line = record_contents.pop()
1526
if len(record_contents) != int(rec[2]):
1527
raise KnitCorrupt(self,
1528
'incorrect number of lines %s != %s'
1529
' for version {%s} %s'
1530
% (len(record_contents), int(rec[2]),
1531
rec[1], record_contents))
1532
if last_line != 'end %s\n' % rec[1]:
1533
raise KnitCorrupt(self,
1534
'unexpected version end line %r, wanted %r'
1535
% (last_line, rec[1]))
1537
return rec, record_contents
1539
def _read_records_iter(self, records):
1540
"""Read text records from data file and yield result.
1542
The result will be returned in whatever is the fastest to read.
1543
Not by the order requested. Also, multiple requests for the same
1544
record will only yield 1 response.
1545
:param records: A list of (key, access_memo) entries
1546
:return: Yields (key, contents, digest) in the order
1547
read, not the order requested
1552
# XXX: This smells wrong, IO may not be getting ordered right.
1553
needed_records = sorted(set(records), key=operator.itemgetter(1))
1554
if not needed_records:
1557
# The transport optimizes the fetching as well
1558
# (ie, reads continuous ranges.)
1559
raw_data = self._access.get_raw_records(
1560
[index_memo for key, index_memo in needed_records])
1562
for (key, index_memo), data in \
1563
izip(iter(needed_records), raw_data):
1564
content, digest = self._parse_record(key[-1], data)
1565
yield key, content, digest
1567
def _read_records_iter_raw(self, records):
1568
"""Read text records from data file and yield raw data.
1570
This unpacks enough of the text record to validate the id is
1571
as expected but thats all.
1573
Each item the iterator yields is (key, bytes, sha1_of_full_text).
1575
# setup an iterator of the external records:
1576
# uses readv so nice and fast we hope.
1578
# grab the disk data needed.
1579
needed_offsets = [index_memo for key, index_memo
1581
raw_records = self._access.get_raw_records(needed_offsets)
1583
for key, index_memo in records:
1584
data = raw_records.next()
1585
# validate the header (note that we can only use the suffix in
1586
# current knit records).
1587
df, rec = self._parse_record_header(key, data)
1589
yield key, data, rec[3]
1591
def _record_to_data(self, key, digest, lines, dense_lines=None):
1592
"""Convert key, digest, lines into a raw data block.
1594
:param key: The key of the record. Currently keys are always serialised
1595
using just the trailing component.
1596
:param dense_lines: The bytes of lines but in a denser form. For
1597
instance, if lines is a list of 1000 bytestrings each ending in \n,
1598
dense_lines may be a list with one line in it, containing all the
1599
1000's lines and their \n's. Using dense_lines if it is already
1600
known is a win because the string join to create bytes in this
1601
function spends less time resizing the final string.
1602
:return: (len, a StringIO instance with the raw data ready to read.)
1604
# Note: using a string copy here increases memory pressure with e.g.
1605
# ISO's, but it is about 3 seconds faster on a 1.2Ghz intel machine
1606
# when doing the initial commit of a mozilla tree. RBC 20070921
1607
bytes = ''.join(chain(
1608
["version %s %d %s\n" % (key[-1],
1611
dense_lines or lines,
1612
["end %s\n" % key[-1]]))
1613
if type(bytes) != str:
1614
raise AssertionError(
1615
'data must be plain bytes was %s' % type(bytes))
1616
if lines and lines[-1][-1] != '\n':
1617
raise ValueError('corrupt lines value %r' % lines)
1618
compressed_bytes = bytes_to_gzip(bytes)
1619
return len(compressed_bytes), compressed_bytes
1621
def _split_header(self, line):
1624
raise KnitCorrupt(self,
1625
'unexpected number of elements in record header')
1629
"""See VersionedFiles.keys."""
1630
if 'evil' in debug.debug_flags:
1631
trace.mutter_callsite(2, "keys scales with size of history")
1632
sources = [self._index] + self._fallback_vfs
1634
for source in sources:
1635
result.update(source.keys())
1640
class _KndxIndex(object):
1641
"""Manages knit index files
1643
The index is kept in memory and read on startup, to enable
1644
fast lookups of revision information. The cursor of the index
1645
file is always pointing to the end, making it easy to append
1648
_cache is a cache for fast mapping from version id to a Index
1651
_history is a cache for fast mapping from indexes to version ids.
1653
The index data format is dictionary compressed when it comes to
1654
parent references; a index entry may only have parents that with a
1655
lover index number. As a result, the index is topological sorted.
1657
Duplicate entries may be written to the index for a single version id
1658
if this is done then the latter one completely replaces the former:
1659
this allows updates to correct version and parent information.
1660
Note that the two entries may share the delta, and that successive
1661
annotations and references MUST point to the first entry.
1663
The index file on disc contains a header, followed by one line per knit
1664
record. The same revision can be present in an index file more than once.
1665
The first occurrence gets assigned a sequence number starting from 0.
1667
The format of a single line is
1668
REVISION_ID FLAGS BYTE_OFFSET LENGTH( PARENT_ID|PARENT_SEQUENCE_ID)* :\n
1669
REVISION_ID is a utf8-encoded revision id
1670
FLAGS is a comma separated list of flags about the record. Values include
1671
no-eol, line-delta, fulltext.
1672
BYTE_OFFSET is the ascii representation of the byte offset in the data file
1673
that the the compressed data starts at.
1674
LENGTH is the ascii representation of the length of the data file.
1675
PARENT_ID a utf-8 revision id prefixed by a '.' that is a parent of
1677
PARENT_SEQUENCE_ID the ascii representation of the sequence number of a
1678
revision id already in the knit that is a parent of REVISION_ID.
1679
The ' :' marker is the end of record marker.
1682
when a write is interrupted to the index file, it will result in a line
1683
that does not end in ' :'. If the ' :' is not present at the end of a line,
1684
or at the end of the file, then the record that is missing it will be
1685
ignored by the parser.
1687
When writing new records to the index file, the data is preceded by '\n'
1688
to ensure that records always start on new lines even if the last write was
1689
interrupted. As a result its normal for the last line in the index to be
1690
missing a trailing newline. One can be added with no harmful effects.
1692
:ivar _kndx_cache: dict from prefix to the old state of KnitIndex objects,
1693
where prefix is e.g. the (fileid,) for .texts instances or () for
1694
constant-mapped things like .revisions, and the old state is
1695
tuple(history_vector, cache_dict). This is used to prevent having an
1696
ABI change with the C extension that reads .kndx files.
1699
HEADER = "# bzr knit index 8\n"
1701
def __init__(self, transport, mapper, get_scope, allow_writes, is_locked):
1702
"""Create a _KndxIndex on transport using mapper."""
1703
self._transport = transport
1704
self._mapper = mapper
1705
self._get_scope = get_scope
1706
self._allow_writes = allow_writes
1707
self._is_locked = is_locked
1709
self.has_graph = True
1711
def add_records(self, records, random_id=False):
1712
"""Add multiple records to the index.
1714
:param records: a list of tuples:
1715
(key, options, access_memo, parents).
1716
:param random_id: If True the ids being added were randomly generated
1717
and no check for existence will be performed.
1720
for record in records:
1723
path = self._mapper.map(key) + '.kndx'
1724
path_keys = paths.setdefault(path, (prefix, []))
1725
path_keys[1].append(record)
1726
for path in sorted(paths):
1727
prefix, path_keys = paths[path]
1728
self._load_prefixes([prefix])
1730
orig_history = self._kndx_cache[prefix][1][:]
1731
orig_cache = self._kndx_cache[prefix][0].copy()
1734
for key, options, (_, pos, size), parents in path_keys:
1736
# kndx indices cannot be parentless.
1738
line = "\n%s %s %s %s %s :" % (
1739
key[-1], ','.join(options), pos, size,
1740
self._dictionary_compress(parents))
1741
if type(line) != str:
1742
raise AssertionError(
1743
'data must be utf8 was %s' % type(line))
1745
self._cache_key(key, options, pos, size, parents)
1746
if len(orig_history):
1747
self._transport.append_bytes(path, ''.join(lines))
1749
self._init_index(path, lines)
1751
# If any problems happen, restore the original values and re-raise
1752
self._kndx_cache[prefix] = (orig_cache, orig_history)
1755
def _cache_key(self, key, options, pos, size, parent_keys):
1756
"""Cache a version record in the history array and index cache.
1758
This is inlined into _load_data for performance. KEEP IN SYNC.
1759
(It saves 60ms, 25% of the __init__ overhead on local 4000 record
1763
version_id = key[-1]
1764
# last-element only for compatibilty with the C load_data.
1765
parents = tuple(parent[-1] for parent in parent_keys)
1766
for parent in parent_keys:
1767
if parent[:-1] != prefix:
1768
raise ValueError("mismatched prefixes for %r, %r" % (
1770
cache, history = self._kndx_cache[prefix]
1771
# only want the _history index to reference the 1st index entry
1773
if version_id not in cache:
1774
index = len(history)
1775
history.append(version_id)
1777
index = cache[version_id][5]
1778
cache[version_id] = (version_id,
1785
def check_header(self, fp):
1786
line = fp.readline()
1788
# An empty file can actually be treated as though the file doesn't
1790
raise errors.NoSuchFile(self)
1791
if line != self.HEADER:
1792
raise KnitHeaderError(badline=line, filename=self)
1794
def _check_read(self):
1795
if not self._is_locked():
1796
raise errors.ObjectNotLocked(self)
1797
if self._get_scope() != self._scope:
1800
def _check_write_ok(self):
1801
"""Assert if not writes are permitted."""
1802
if not self._is_locked():
1803
raise errors.ObjectNotLocked(self)
1804
if self._get_scope() != self._scope:
1806
if self._mode != 'w':
1807
raise errors.ReadOnlyObjectDirtiedError(self)
1809
def get_build_details(self, keys):
1810
"""Get the method, index_memo and compression parent for keys.
1812
Ghosts are omitted from the result.
1814
:param keys: An iterable of keys.
1815
:return: A dict of key:(index_memo, compression_parent, parents,
1818
opaque structure to pass to read_records to extract the raw
1821
Content that this record is built upon, may be None
1823
Logical parents of this node
1825
extra information about the content which needs to be passed to
1826
Factory.parse_record
1828
prefixes = self._partition_keys(keys)
1829
parent_map = self.get_parent_map(keys)
1832
if key not in parent_map:
1834
method = self.get_method(key)
1835
parents = parent_map[key]
1836
if method == 'fulltext':
1837
compression_parent = None
1839
compression_parent = parents[0]
1840
noeol = 'no-eol' in self.get_options(key)
1841
index_memo = self.get_position(key)
1842
result[key] = (index_memo, compression_parent,
1843
parents, (method, noeol))
1846
def get_method(self, key):
1847
"""Return compression method of specified key."""
1848
options = self.get_options(key)
1849
if 'fulltext' in options:
1851
elif 'line-delta' in options:
1854
raise errors.KnitIndexUnknownMethod(self, options)
1856
def get_options(self, key):
1857
"""Return a list representing options.
1861
prefix, suffix = self._split_key(key)
1862
self._load_prefixes([prefix])
1864
return self._kndx_cache[prefix][0][suffix][1]
1866
raise RevisionNotPresent(key, self)
1868
def get_parent_map(self, keys):
1869
"""Get a map of the parents of keys.
1871
:param keys: The keys to look up parents for.
1872
:return: A mapping from keys to parents. Absent keys are absent from
1875
# Parse what we need to up front, this potentially trades off I/O
1876
# locality (.kndx and .knit in the same block group for the same file
1877
# id) for less checking in inner loops.
1878
prefixes = set(key[:-1] for key in keys)
1879
self._load_prefixes(prefixes)
1884
suffix_parents = self._kndx_cache[prefix][0][key[-1]][4]
1888
result[key] = tuple(prefix + (suffix,) for
1889
suffix in suffix_parents)
1892
def get_position(self, key):
1893
"""Return details needed to access the version.
1895
:return: a tuple (key, data position, size) to hand to the access
1896
logic to get the record.
1898
prefix, suffix = self._split_key(key)
1899
self._load_prefixes([prefix])
1900
entry = self._kndx_cache[prefix][0][suffix]
1901
return key, entry[2], entry[3]
1903
def _init_index(self, path, extra_lines=[]):
1904
"""Initialize an index."""
1906
sio.write(self.HEADER)
1907
sio.writelines(extra_lines)
1909
self._transport.put_file_non_atomic(path, sio,
1910
create_parent_dir=True)
1911
# self._create_parent_dir)
1912
# mode=self._file_mode,
1913
# dir_mode=self._dir_mode)
1916
"""Get all the keys in the collection.
1918
The keys are not ordered.
1921
# Identify all key prefixes.
1922
# XXX: A bit hacky, needs polish.
1923
if type(self._mapper) == ConstantMapper:
1927
for quoted_relpath in self._transport.iter_files_recursive():
1928
path, ext = os.path.splitext(quoted_relpath)
1930
prefixes = [self._mapper.unmap(path) for path in relpaths]
1931
self._load_prefixes(prefixes)
1932
for prefix in prefixes:
1933
for suffix in self._kndx_cache[prefix][1]:
1934
result.add(prefix + (suffix,))
1937
def _load_prefixes(self, prefixes):
1938
"""Load the indices for prefixes."""
1940
for prefix in prefixes:
1941
if prefix not in self._kndx_cache:
1942
# the load_data interface writes to these variables.
1945
self._filename = prefix
1947
path = self._mapper.map(prefix) + '.kndx'
1948
fp = self._transport.get(path)
1950
# _load_data may raise NoSuchFile if the target knit is
1952
_load_data(self, fp)
1955
self._kndx_cache[prefix] = (self._cache, self._history)
1960
self._kndx_cache[prefix] = ({}, [])
1961
if type(self._mapper) == ConstantMapper:
1962
# preserve behaviour for revisions.kndx etc.
1963
self._init_index(path)
1968
def _partition_keys(self, keys):
1969
"""Turn keys into a dict of prefix:suffix_list."""
1972
prefix_keys = result.setdefault(key[:-1], [])
1973
prefix_keys.append(key[-1])
1976
def _dictionary_compress(self, keys):
1977
"""Dictionary compress keys.
1979
:param keys: The keys to generate references to.
1980
:return: A string representation of keys. keys which are present are
1981
dictionary compressed, and others are emitted as fulltext with a
1987
prefix = keys[0][:-1]
1988
cache = self._kndx_cache[prefix][0]
1990
if key[:-1] != prefix:
1991
# kndx indices cannot refer across partitioned storage.
1992
raise ValueError("mismatched prefixes for %r" % keys)
1993
if key[-1] in cache:
1994
# -- inlined lookup() --
1995
result_list.append(str(cache[key[-1]][5]))
1996
# -- end lookup () --
1998
result_list.append('.' + key[-1])
1999
return ' '.join(result_list)
2001
def _reset_cache(self):
2002
# Possibly this should be a LRU cache. A dictionary from key_prefix to
2003
# (cache_dict, history_vector) for parsed kndx files.
2004
self._kndx_cache = {}
2005
self._scope = self._get_scope()
2006
allow_writes = self._allow_writes()
2012
def _split_key(self, key):
2013
"""Split key into a prefix and suffix."""
2014
return key[:-1], key[-1]
2017
class _KnitGraphIndex(object):
2018
"""A KnitVersionedFiles index layered on GraphIndex."""
2020
def __init__(self, graph_index, is_locked, deltas=False, parents=True,
2022
"""Construct a KnitGraphIndex on a graph_index.
2024
:param graph_index: An implementation of bzrlib.index.GraphIndex.
2025
:param is_locked: A callback to check whether the object should answer
2027
:param deltas: Allow delta-compressed records.
2028
:param parents: If True, record knits parents, if not do not record
2030
:param add_callback: If not None, allow additions to the index and call
2031
this callback with a list of added GraphIndex nodes:
2032
[(node, value, node_refs), ...]
2033
:param is_locked: A callback, returns True if the index is locked and
2036
self._add_callback = add_callback
2037
self._graph_index = graph_index
2038
self._deltas = deltas
2039
self._parents = parents
2040
if deltas and not parents:
2041
# XXX: TODO: Delta tree and parent graph should be conceptually
2043
raise KnitCorrupt(self, "Cannot do delta compression without "
2045
self.has_graph = parents
2046
self._is_locked = is_locked
2048
def add_records(self, records, random_id=False):
2049
"""Add multiple records to the index.
2051
This function does not insert data into the Immutable GraphIndex
2052
backing the KnitGraphIndex, instead it prepares data for insertion by
2053
the caller and checks that it is safe to insert then calls
2054
self._add_callback with the prepared GraphIndex nodes.
2056
:param records: a list of tuples:
2057
(key, options, access_memo, parents).
2058
:param random_id: If True the ids being added were randomly generated
2059
and no check for existence will be performed.
2061
if not self._add_callback:
2062
raise errors.ReadOnlyError(self)
2063
# we hope there are no repositories with inconsistent parentage
2067
for (key, options, access_memo, parents) in records:
2069
parents = tuple(parents)
2070
index, pos, size = access_memo
2071
if 'no-eol' in options:
2075
value += "%d %d" % (pos, size)
2076
if not self._deltas:
2077
if 'line-delta' in options:
2078
raise KnitCorrupt(self, "attempt to add line-delta in non-delta knit")
2081
if 'line-delta' in options:
2082
node_refs = (parents, (parents[0],))
2084
node_refs = (parents, ())
2086
node_refs = (parents, )
2089
raise KnitCorrupt(self, "attempt to add node with parents "
2090
"in parentless index.")
2092
keys[key] = (value, node_refs)
2095
present_nodes = self._get_entries(keys)
2096
for (index, key, value, node_refs) in present_nodes:
2097
if (value[0] != keys[key][0][0] or
2098
node_refs != keys[key][1]):
2099
raise KnitCorrupt(self, "inconsistent details in add_records"
2100
": %s %s" % ((value, node_refs), keys[key]))
2104
for key, (value, node_refs) in keys.iteritems():
2105
result.append((key, value, node_refs))
2107
for key, (value, node_refs) in keys.iteritems():
2108
result.append((key, value))
2109
self._add_callback(result)
2111
def _check_read(self):
2112
"""raise if reads are not permitted."""
2113
if not self._is_locked():
2114
raise errors.ObjectNotLocked(self)
2116
def _check_write_ok(self):
2117
"""Assert if writes are not permitted."""
2118
if not self._is_locked():
2119
raise errors.ObjectNotLocked(self)
2121
def _compression_parent(self, an_entry):
2122
# return the key that an_entry is compressed against, or None
2123
# Grab the second parent list (as deltas implies parents currently)
2124
compression_parents = an_entry[3][1]
2125
if not compression_parents:
2127
if len(compression_parents) != 1:
2128
raise AssertionError(
2129
"Too many compression parents: %r" % compression_parents)
2130
return compression_parents[0]
2132
def get_build_details(self, keys):
2133
"""Get the method, index_memo and compression parent for version_ids.
2135
Ghosts are omitted from the result.
2137
:param keys: An iterable of keys.
2138
:return: A dict of key:
2139
(index_memo, compression_parent, parents, record_details).
2141
opaque structure to pass to read_records to extract the raw
2144
Content that this record is built upon, may be None
2146
Logical parents of this node
2148
extra information about the content which needs to be passed to
2149
Factory.parse_record
2153
entries = self._get_entries(keys, False)
2154
for entry in entries:
2156
if not self._parents:
2159
parents = entry[3][0]
2160
if not self._deltas:
2161
compression_parent_key = None
2163
compression_parent_key = self._compression_parent(entry)
2164
noeol = (entry[2][0] == 'N')
2165
if compression_parent_key:
2166
method = 'line-delta'
2169
result[key] = (self._node_to_position(entry),
2170
compression_parent_key, parents,
2174
def _get_entries(self, keys, check_present=False):
2175
"""Get the entries for keys.
2177
:param keys: An iterable of index key tuples.
2182
for node in self._graph_index.iter_entries(keys):
2184
found_keys.add(node[1])
2186
# adapt parentless index to the rest of the code.
2187
for node in self._graph_index.iter_entries(keys):
2188
yield node[0], node[1], node[2], ()
2189
found_keys.add(node[1])
2191
missing_keys = keys.difference(found_keys)
2193
raise RevisionNotPresent(missing_keys.pop(), self)
2195
def get_method(self, key):
2196
"""Return compression method of specified key."""
2197
return self._get_method(self._get_node(key))
2199
def _get_method(self, node):
2200
if not self._deltas:
2202
if self._compression_parent(node):
2207
def _get_node(self, key):
2209
return list(self._get_entries([key]))[0]
2211
raise RevisionNotPresent(key, self)
2213
def get_options(self, key):
2214
"""Return a list representing options.
2218
node = self._get_node(key)
2219
options = [self._get_method(node)]
2220
if node[2][0] == 'N':
2221
options.append('no-eol')
2224
def get_parent_map(self, keys):
2225
"""Get a map of the parents of keys.
2227
:param keys: The keys to look up parents for.
2228
:return: A mapping from keys to parents. Absent keys are absent from
2232
nodes = self._get_entries(keys)
2236
result[node[1]] = node[3][0]
2239
result[node[1]] = None
2242
def get_position(self, key):
2243
"""Return details needed to access the version.
2245
:return: a tuple (index, data position, size) to hand to the access
2246
logic to get the record.
2248
node = self._get_node(key)
2249
return self._node_to_position(node)
2252
"""Get all the keys in the collection.
2254
The keys are not ordered.
2257
return [node[1] for node in self._graph_index.iter_all_entries()]
2259
def _node_to_position(self, node):
2260
"""Convert an index value to position details."""
2261
bits = node[2][1:].split(' ')
2262
return node[0], int(bits[0]), int(bits[1])
2265
class _KnitKeyAccess(object):
2266
"""Access to records in .knit files."""
2268
def __init__(self, transport, mapper):
2269
"""Create a _KnitKeyAccess with transport and mapper.
2271
:param transport: The transport the access object is rooted at.
2272
:param mapper: The mapper used to map keys to .knit files.
2274
self._transport = transport
2275
self._mapper = mapper
2277
def add_raw_records(self, key_sizes, raw_data):
2278
"""Add raw knit bytes to a storage area.
2280
The data is spooled to the container writer in one bytes-record per
2283
:param sizes: An iterable of tuples containing the key and size of each
2285
:param raw_data: A bytestring containing the data.
2286
:return: A list of memos to retrieve the record later. Each memo is an
2287
opaque index memo. For _KnitKeyAccess the memo is (key, pos,
2288
length), where the key is the record key.
2290
if type(raw_data) != str:
2291
raise AssertionError(
2292
'data must be plain bytes was %s' % type(raw_data))
2295
# TODO: This can be tuned for writing to sftp and other servers where
2296
# append() is relatively expensive by grouping the writes to each key
2298
for key, size in key_sizes:
2299
path = self._mapper.map(key)
2301
base = self._transport.append_bytes(path + '.knit',
2302
raw_data[offset:offset+size])
2303
except errors.NoSuchFile:
2304
self._transport.mkdir(osutils.dirname(path))
2305
base = self._transport.append_bytes(path + '.knit',
2306
raw_data[offset:offset+size])
2310
result.append((key, base, size))
2313
def get_raw_records(self, memos_for_retrieval):
2314
"""Get the raw bytes for a records.
2316
:param memos_for_retrieval: An iterable containing the access memo for
2317
retrieving the bytes.
2318
:return: An iterator over the bytes of the records.
2320
# first pass, group into same-index request to minimise readv's issued.
2322
current_prefix = None
2323
for (key, offset, length) in memos_for_retrieval:
2324
if current_prefix == key[:-1]:
2325
current_list.append((offset, length))
2327
if current_prefix is not None:
2328
request_lists.append((current_prefix, current_list))
2329
current_prefix = key[:-1]
2330
current_list = [(offset, length)]
2331
# handle the last entry
2332
if current_prefix is not None:
2333
request_lists.append((current_prefix, current_list))
2334
for prefix, read_vector in request_lists:
2335
path = self._mapper.map(prefix) + '.knit'
2336
for pos, data in self._transport.readv(path, read_vector):
2340
class _DirectPackAccess(object):
2341
"""Access to data in one or more packs with less translation."""
2343
def __init__(self, index_to_packs):
2344
"""Create a _DirectPackAccess object.
2346
:param index_to_packs: A dict mapping index objects to the transport
2347
and file names for obtaining data.
2349
self._container_writer = None
2350
self._write_index = None
2351
self._indices = index_to_packs
2353
def add_raw_records(self, key_sizes, raw_data):
2354
"""Add raw knit bytes to a storage area.
2356
The data is spooled to the container writer in one bytes-record per
2359
:param sizes: An iterable of tuples containing the key and size of each
2361
:param raw_data: A bytestring containing the data.
2362
:return: A list of memos to retrieve the record later. Each memo is an
2363
opaque index memo. For _DirectPackAccess the memo is (index, pos,
2364
length), where the index field is the write_index object supplied
2365
to the PackAccess object.
2367
if type(raw_data) != str:
2368
raise AssertionError(
2369
'data must be plain bytes was %s' % type(raw_data))
2372
for key, size in key_sizes:
2373
p_offset, p_length = self._container_writer.add_bytes_record(
2374
raw_data[offset:offset+size], [])
2376
result.append((self._write_index, p_offset, p_length))
2379
def get_raw_records(self, memos_for_retrieval):
2380
"""Get the raw bytes for a records.
2382
:param memos_for_retrieval: An iterable containing the (index, pos,
2383
length) memo for retrieving the bytes. The Pack access method
2384
looks up the pack to use for a given record in its index_to_pack
2386
:return: An iterator over the bytes of the records.
2388
# first pass, group into same-index requests
2390
current_index = None
2391
for (index, offset, length) in memos_for_retrieval:
2392
if current_index == index:
2393
current_list.append((offset, length))
2395
if current_index is not None:
2396
request_lists.append((current_index, current_list))
2397
current_index = index
2398
current_list = [(offset, length)]
2399
# handle the last entry
2400
if current_index is not None:
2401
request_lists.append((current_index, current_list))
2402
for index, offsets in request_lists:
2403
transport, path = self._indices[index]
2404
reader = pack.make_readv_reader(transport, path, offsets)
2405
for names, read_func in reader.iter_records():
2406
yield read_func(None)
2408
def set_writer(self, writer, index, transport_packname):
2409
"""Set a writer to use for adding data."""
2410
if index is not None:
2411
self._indices[index] = transport_packname
2412
self._container_writer = writer
2413
self._write_index = index
2416
# Deprecated, use PatienceSequenceMatcher instead
2417
KnitSequenceMatcher = patiencediff.PatienceSequenceMatcher
2420
def annotate_knit(knit, revision_id):
2421
"""Annotate a knit with no cached annotations.
2423
This implementation is for knits with no cached annotations.
2424
It will work for knits with cached annotations, but this is not
2427
annotator = _KnitAnnotator(knit)
2428
return iter(annotator.annotate(revision_id))
2431
class _KnitAnnotator(object):
2432
"""Build up the annotations for a text."""
2434
def __init__(self, knit):
2437
# Content objects, differs from fulltexts because of how final newlines
2438
# are treated by knits. the content objects here will always have a
2440
self._fulltext_contents = {}
2442
# Annotated lines of specific revisions
2443
self._annotated_lines = {}
2445
# Track the raw data for nodes that we could not process yet.
2446
# This maps the revision_id of the base to a list of children that will
2447
# annotated from it.
2448
self._pending_children = {}
2450
# Nodes which cannot be extracted
2451
self._ghosts = set()
2453
# Track how many children this node has, so we know if we need to keep
2455
self._annotate_children = {}
2456
self._compression_children = {}
2458
self._all_build_details = {}
2459
# The children => parent revision_id graph
2460
self._revision_id_graph = {}
2462
self._heads_provider = None
2464
self._nodes_to_keep_annotations = set()
2465
self._generations_until_keep = 100
2467
def set_generations_until_keep(self, value):
2468
"""Set the number of generations before caching a node.
2470
Setting this to -1 will cache every merge node, setting this higher
2471
will cache fewer nodes.
2473
self._generations_until_keep = value
2475
def _add_fulltext_content(self, revision_id, content_obj):
2476
self._fulltext_contents[revision_id] = content_obj
2477
# TODO: jam 20080305 It might be good to check the sha1digest here
2478
return content_obj.text()
2480
def _check_parents(self, child, nodes_to_annotate):
2481
"""Check if all parents have been processed.
2483
:param child: A tuple of (rev_id, parents, raw_content)
2484
:param nodes_to_annotate: If child is ready, add it to
2485
nodes_to_annotate, otherwise put it back in self._pending_children
2487
for parent_id in child[1]:
2488
if (parent_id not in self._annotated_lines):
2489
# This parent is present, but another parent is missing
2490
self._pending_children.setdefault(parent_id,
2494
# This one is ready to be processed
2495
nodes_to_annotate.append(child)
2497
def _add_annotation(self, revision_id, fulltext, parent_ids,
2498
left_matching_blocks=None):
2499
"""Add an annotation entry.
2501
All parents should already have been annotated.
2502
:return: A list of children that now have their parents satisfied.
2504
a = self._annotated_lines
2505
annotated_parent_lines = [a[p] for p in parent_ids]
2506
annotated_lines = list(annotate.reannotate(annotated_parent_lines,
2507
fulltext, revision_id, left_matching_blocks,
2508
heads_provider=self._get_heads_provider()))
2509
self._annotated_lines[revision_id] = annotated_lines
2510
for p in parent_ids:
2511
ann_children = self._annotate_children[p]
2512
ann_children.remove(revision_id)
2513
if (not ann_children
2514
and p not in self._nodes_to_keep_annotations):
2515
del self._annotated_lines[p]
2516
del self._all_build_details[p]
2517
if p in self._fulltext_contents:
2518
del self._fulltext_contents[p]
2519
# Now that we've added this one, see if there are any pending
2520
# deltas to be done, certainly this parent is finished
2521
nodes_to_annotate = []
2522
for child in self._pending_children.pop(revision_id, []):
2523
self._check_parents(child, nodes_to_annotate)
2524
return nodes_to_annotate
2526
def _get_build_graph(self, key):
2527
"""Get the graphs for building texts and annotations.
2529
The data you need for creating a full text may be different than the
2530
data you need to annotate that text. (At a minimum, you need both
2531
parents to create an annotation, but only need 1 parent to generate the
2534
:return: A list of (key, index_memo) records, suitable for
2535
passing to read_records_iter to start reading in the raw data fro/
2538
if key in self._annotated_lines:
2541
pending = set([key])
2546
# get all pending nodes
2548
this_iteration = pending
2549
build_details = self._knit._index.get_build_details(this_iteration)
2550
self._all_build_details.update(build_details)
2551
# new_nodes = self._knit._index._get_entries(this_iteration)
2553
for key, details in build_details.iteritems():
2554
(index_memo, compression_parent, parents,
2555
record_details) = details
2556
self._revision_id_graph[key] = parents
2557
records.append((key, index_memo))
2558
# Do we actually need to check _annotated_lines?
2559
pending.update(p for p in parents
2560
if p not in self._all_build_details)
2561
if compression_parent:
2562
self._compression_children.setdefault(compression_parent,
2565
for parent in parents:
2566
self._annotate_children.setdefault(parent,
2568
num_gens = generation - kept_generation
2569
if ((num_gens >= self._generations_until_keep)
2570
and len(parents) > 1):
2571
kept_generation = generation
2572
self._nodes_to_keep_annotations.add(key)
2574
missing_versions = this_iteration.difference(build_details.keys())
2575
self._ghosts.update(missing_versions)
2576
for missing_version in missing_versions:
2577
# add a key, no parents
2578
self._revision_id_graph[missing_version] = ()
2579
pending.discard(missing_version) # don't look for it
2580
if self._ghosts.intersection(self._compression_children):
2582
"We cannot have nodes which have a ghost compression parent:\n"
2584
"compression children: %r"
2585
% (self._ghosts, self._compression_children))
2586
# Cleanout anything that depends on a ghost so that we don't wait for
2587
# the ghost to show up
2588
for node in self._ghosts:
2589
if node in self._annotate_children:
2590
# We won't be building this node
2591
del self._annotate_children[node]
2592
# Generally we will want to read the records in reverse order, because
2593
# we find the parent nodes after the children
2597
def _annotate_records(self, records):
2598
"""Build the annotations for the listed records."""
2599
# We iterate in the order read, rather than a strict order requested
2600
# However, process what we can, and put off to the side things that
2601
# still need parents, cleaning them up when those parents are
2603
for (rev_id, record,
2604
digest) in self._knit._read_records_iter(records):
2605
if rev_id in self._annotated_lines:
2607
parent_ids = self._revision_id_graph[rev_id]
2608
parent_ids = [p for p in parent_ids if p not in self._ghosts]
2609
details = self._all_build_details[rev_id]
2610
(index_memo, compression_parent, parents,
2611
record_details) = details
2612
nodes_to_annotate = []
2613
# TODO: Remove the punning between compression parents, and
2614
# parent_ids, we should be able to do this without assuming
2616
if len(parent_ids) == 0:
2617
# There are no parents for this node, so just add it
2618
# TODO: This probably needs to be decoupled
2619
fulltext_content, delta = self._knit._factory.parse_record(
2620
rev_id, record, record_details, None)
2621
fulltext = self._add_fulltext_content(rev_id, fulltext_content)
2622
nodes_to_annotate.extend(self._add_annotation(rev_id, fulltext,
2623
parent_ids, left_matching_blocks=None))
2625
child = (rev_id, parent_ids, record)
2626
# Check if all the parents are present
2627
self._check_parents(child, nodes_to_annotate)
2628
while nodes_to_annotate:
2629
# Should we use a queue here instead of a stack?
2630
(rev_id, parent_ids, record) = nodes_to_annotate.pop()
2631
(index_memo, compression_parent, parents,
2632
record_details) = self._all_build_details[rev_id]
2633
if compression_parent is not None:
2634
comp_children = self._compression_children[compression_parent]
2635
if rev_id not in comp_children:
2636
raise AssertionError("%r not in compression children %r"
2637
% (rev_id, comp_children))
2638
# If there is only 1 child, it is safe to reuse this
2640
reuse_content = (len(comp_children) == 1
2641
and compression_parent not in
2642
self._nodes_to_keep_annotations)
2644
# Remove it from the cache since it will be changing
2645
parent_fulltext_content = self._fulltext_contents.pop(compression_parent)
2646
# Make sure to copy the fulltext since it might be
2648
parent_fulltext = list(parent_fulltext_content.text())
2650
parent_fulltext_content = self._fulltext_contents[compression_parent]
2651
parent_fulltext = parent_fulltext_content.text()
2652
comp_children.remove(rev_id)
2653
fulltext_content, delta = self._knit._factory.parse_record(
2654
rev_id, record, record_details,
2655
parent_fulltext_content,
2656
copy_base_content=(not reuse_content))
2657
fulltext = self._add_fulltext_content(rev_id,
2659
blocks = KnitContent.get_line_delta_blocks(delta,
2660
parent_fulltext, fulltext)
2662
fulltext_content = self._knit._factory.parse_fulltext(
2664
fulltext = self._add_fulltext_content(rev_id,
2667
nodes_to_annotate.extend(
2668
self._add_annotation(rev_id, fulltext, parent_ids,
2669
left_matching_blocks=blocks))
2671
def _get_heads_provider(self):
2672
"""Create a heads provider for resolving ancestry issues."""
2673
if self._heads_provider is not None:
2674
return self._heads_provider
2675
parent_provider = _mod_graph.DictParentsProvider(
2676
self._revision_id_graph)
2677
graph_obj = _mod_graph.Graph(parent_provider)
2678
head_cache = _mod_graph.FrozenHeadsCache(graph_obj)
2679
self._heads_provider = head_cache
2682
def annotate(self, key):
2683
"""Return the annotated fulltext at the given key.
2685
:param key: The key to annotate.
2687
records = self._get_build_graph(key)
2688
if key in self._ghosts:
2689
raise errors.RevisionNotPresent(key, self._knit)
2690
self._annotate_records(records)
2691
return self._annotated_lines[key]
2695
from bzrlib._knit_load_data_c import _load_data_c as _load_data
2697
from bzrlib._knit_load_data_py import _load_data_py as _load_data