1
# Copyright (C) 2005, 2006, 2007 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
"""Knit versionedfile implementation.
19
A knit is a versioned file implementation that supports efficient append only
23
lifeless: the data file is made up of "delta records". each delta record has a delta header
24
that contains; (1) a version id, (2) the size of the delta (in lines), and (3) the digest of
25
the -expanded data- (ie, the delta applied to the parent). the delta also ends with a
26
end-marker; simply "end VERSION"
28
delta can be line or full contents.a
29
... the 8's there are the index number of the annotation.
30
version robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad 7 c7d23b2a5bd6ca00e8e266cec0ec228158ee9f9e
34
8 e.set('executable', 'yes')
36
8 if elt.get('executable') == 'yes':
37
8 ie.executable = True
38
end robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad
42
09:33 < jrydberg> lifeless: each index is made up of a tuple of; version id, options, position, size, parents
43
09:33 < jrydberg> lifeless: the parents are currently dictionary compressed
44
09:33 < jrydberg> lifeless: (meaning it currently does not support ghosts)
45
09:33 < lifeless> right
46
09:33 < jrydberg> lifeless: the position and size is the range in the data file
49
so the index sequence is the dictionary compressed sequence number used
50
in the deltas to provide line annotation
55
# 10:16 < lifeless> make partial index writes safe
56
# 10:16 < lifeless> implement 'knit.check()' like weave.check()
57
# 10:17 < lifeless> record known ghosts so we can detect when they are filled in rather than the current 'reweave
59
# move sha1 out of the content so that join is faster at verifying parents
60
# record content length ?
63
from cStringIO import StringIO
64
from itertools import izip, chain
70
from zlib import Z_DEFAULT_COMPRESSION
73
from bzrlib.lazy_import import lazy_import
74
lazy_import(globals(), """
95
from bzrlib.errors import (
103
RevisionAlreadyPresent,
105
from bzrlib.graph import Graph
106
from bzrlib.osutils import (
113
from bzrlib.tsort import topo_sort
114
from bzrlib.tuned_gzip import GzipFile, bytes_to_gzip
116
from bzrlib.versionedfile import (
117
AbsentContentFactory,
121
FulltextContentFactory,
128
# TODO: Split out code specific to this format into an associated object.
130
# TODO: Can we put in some kind of value to check that the index and data
131
# files belong together?
133
# TODO: accommodate binaries, perhaps by storing a byte count
135
# TODO: function to check whole file
137
# TODO: atomically append data, then measure backwards from the cursor
138
# position after writing to work out where it was located. we may need to
139
# bypass python file buffering.
141
DATA_SUFFIX = '.knit'
142
INDEX_SUFFIX = '.kndx'
145
class KnitAdapter(object):
146
"""Base class for knit record adaption."""
148
def __init__(self, basis_vf):
149
"""Create an adapter which accesses full texts from basis_vf.
151
:param basis_vf: A versioned file to access basis texts of deltas from.
152
May be None for adapters that do not need to access basis texts.
154
self._data = KnitVersionedFiles(None, None)
155
self._annotate_factory = KnitAnnotateFactory()
156
self._plain_factory = KnitPlainFactory()
157
self._basis_vf = basis_vf
160
class FTAnnotatedToUnannotated(KnitAdapter):
161
"""An adapter from FT annotated knits to unannotated ones."""
163
def get_bytes(self, factory, annotated_compressed_bytes):
165
self._data._parse_record_unchecked(annotated_compressed_bytes)
166
content = self._annotate_factory.parse_fulltext(contents, rec[1])
167
size, bytes = self._data._record_to_data((rec[1],), rec[3], content.text())
171
class DeltaAnnotatedToUnannotated(KnitAdapter):
172
"""An adapter for deltas from annotated to unannotated."""
174
def get_bytes(self, factory, annotated_compressed_bytes):
176
self._data._parse_record_unchecked(annotated_compressed_bytes)
177
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
179
contents = self._plain_factory.lower_line_delta(delta)
180
size, bytes = self._data._record_to_data((rec[1],), rec[3], contents)
184
class FTAnnotatedToFullText(KnitAdapter):
185
"""An adapter from FT annotated knits to unannotated ones."""
187
def get_bytes(self, factory, annotated_compressed_bytes):
189
self._data._parse_record_unchecked(annotated_compressed_bytes)
190
content, delta = self._annotate_factory.parse_record(factory.key[-1],
191
contents, factory._build_details, None)
192
return ''.join(content.text())
195
class DeltaAnnotatedToFullText(KnitAdapter):
196
"""An adapter for deltas from annotated to unannotated."""
198
def get_bytes(self, factory, annotated_compressed_bytes):
200
self._data._parse_record_unchecked(annotated_compressed_bytes)
201
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
203
compression_parent = factory.parents[0]
204
basis_entry = self._basis_vf.get_record_stream(
205
[compression_parent], 'unordered', True).next()
206
if basis_entry.storage_kind == 'absent':
207
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
208
basis_lines = split_lines(basis_entry.get_bytes_as('fulltext'))
209
# Manually apply the delta because we have one annotated content and
211
basis_content = PlainKnitContent(basis_lines, compression_parent)
212
basis_content.apply_delta(delta, rec[1])
213
basis_content._should_strip_eol = factory._build_details[1]
214
return ''.join(basis_content.text())
217
class FTPlainToFullText(KnitAdapter):
218
"""An adapter from FT plain knits to unannotated ones."""
220
def get_bytes(self, factory, compressed_bytes):
222
self._data._parse_record_unchecked(compressed_bytes)
223
content, delta = self._plain_factory.parse_record(factory.key[-1],
224
contents, factory._build_details, None)
225
return ''.join(content.text())
228
class DeltaPlainToFullText(KnitAdapter):
229
"""An adapter for deltas from annotated to unannotated."""
231
def get_bytes(self, factory, compressed_bytes):
233
self._data._parse_record_unchecked(compressed_bytes)
234
delta = self._plain_factory.parse_line_delta(contents, rec[1])
235
compression_parent = factory.parents[0]
236
# XXX: string splitting overhead.
237
basis_entry = self._basis_vf.get_record_stream(
238
[compression_parent], 'unordered', True).next()
239
if basis_entry.storage_kind == 'absent':
240
raise errors.RevisionNotPresent(compression_parent, self._basis_vf)
241
basis_lines = split_lines(basis_entry.get_bytes_as('fulltext'))
242
basis_content = PlainKnitContent(basis_lines, compression_parent)
243
# Manually apply the delta because we have one annotated content and
245
content, _ = self._plain_factory.parse_record(rec[1], contents,
246
factory._build_details, basis_content)
247
return ''.join(content.text())
250
class KnitContentFactory(ContentFactory):
251
"""Content factory for streaming from knits.
253
:seealso ContentFactory:
256
def __init__(self, key, parents, build_details, sha1, raw_record,
257
annotated, knit=None):
258
"""Create a KnitContentFactory for key.
261
:param parents: The parents.
262
:param build_details: The build details as returned from
264
:param sha1: The sha1 expected from the full text of this object.
265
:param raw_record: The bytes of the knit data from disk.
266
:param annotated: True if the raw data is annotated.
268
ContentFactory.__init__(self)
271
self.parents = parents
272
if build_details[0] == 'line-delta':
277
annotated_kind = 'annotated-'
280
self.storage_kind = 'knit-%s%s-gz' % (annotated_kind, kind)
281
self._raw_record = raw_record
282
self._build_details = build_details
285
def get_bytes_as(self, storage_kind):
286
if storage_kind == self.storage_kind:
287
return self._raw_record
288
if storage_kind == 'fulltext' and self._knit is not None:
289
return self._knit.get_text(self.key[0])
291
raise errors.UnavailableRepresentation(self.key, storage_kind,
295
class KnitContent(object):
296
"""Content of a knit version to which deltas can be applied.
298
This is always stored in memory as a list of lines with \n at the end,
299
plus a flag saying if the final ending is really there or not, because that
300
corresponds to the on-disk knit representation.
304
self._should_strip_eol = False
306
def apply_delta(self, delta, new_version_id):
307
"""Apply delta to this object to become new_version_id."""
308
raise NotImplementedError(self.apply_delta)
310
def line_delta_iter(self, new_lines):
311
"""Generate line-based delta from this content to new_lines."""
312
new_texts = new_lines.text()
313
old_texts = self.text()
314
s = patiencediff.PatienceSequenceMatcher(None, old_texts, new_texts)
315
for tag, i1, i2, j1, j2 in s.get_opcodes():
318
# ofrom, oto, length, data
319
yield i1, i2, j2 - j1, new_lines._lines[j1:j2]
321
def line_delta(self, new_lines):
322
return list(self.line_delta_iter(new_lines))
325
def get_line_delta_blocks(knit_delta, source, target):
326
"""Extract SequenceMatcher.get_matching_blocks() from a knit delta"""
327
target_len = len(target)
330
for s_begin, s_end, t_len, new_text in knit_delta:
331
true_n = s_begin - s_pos
334
# knit deltas do not provide reliable info about whether the
335
# last line of a file matches, due to eol handling.
336
if source[s_pos + n -1] != target[t_pos + n -1]:
339
yield s_pos, t_pos, n
340
t_pos += t_len + true_n
342
n = target_len - t_pos
344
if source[s_pos + n -1] != target[t_pos + n -1]:
347
yield s_pos, t_pos, n
348
yield s_pos + (target_len - t_pos), target_len, 0
351
class AnnotatedKnitContent(KnitContent):
352
"""Annotated content."""
354
def __init__(self, lines):
355
KnitContent.__init__(self)
359
"""Return a list of (origin, text) for each content line."""
360
lines = self._lines[:]
361
if self._should_strip_eol:
362
origin, last_line = lines[-1]
363
lines[-1] = (origin, last_line.rstrip('\n'))
366
def apply_delta(self, delta, new_version_id):
367
"""Apply delta to this object to become new_version_id."""
370
for start, end, count, delta_lines in delta:
371
lines[offset+start:offset+end] = delta_lines
372
offset = offset + (start - end) + count
376
lines = [text for origin, text in self._lines]
377
except ValueError, e:
378
# most commonly (only?) caused by the internal form of the knit
379
# missing annotation information because of a bug - see thread
381
raise KnitCorrupt(self,
382
"line in annotated knit missing annotation information: %s"
384
if self._should_strip_eol:
385
lines[-1] = lines[-1].rstrip('\n')
389
return AnnotatedKnitContent(self._lines[:])
392
class PlainKnitContent(KnitContent):
393
"""Unannotated content.
395
When annotate[_iter] is called on this content, the same version is reported
396
for all lines. Generally, annotate[_iter] is not useful on PlainKnitContent
400
def __init__(self, lines, version_id):
401
KnitContent.__init__(self)
403
self._version_id = version_id
406
"""Return a list of (origin, text) for each content line."""
407
return [(self._version_id, line) for line in self._lines]
409
def apply_delta(self, delta, new_version_id):
410
"""Apply delta to this object to become new_version_id."""
413
for start, end, count, delta_lines in delta:
414
lines[offset+start:offset+end] = delta_lines
415
offset = offset + (start - end) + count
416
self._version_id = new_version_id
419
return PlainKnitContent(self._lines[:], self._version_id)
423
if self._should_strip_eol:
425
lines[-1] = lines[-1].rstrip('\n')
429
class _KnitFactory(object):
430
"""Base class for common Factory functions."""
432
def parse_record(self, version_id, record, record_details,
433
base_content, copy_base_content=True):
434
"""Parse a record into a full content object.
436
:param version_id: The official version id for this content
437
:param record: The data returned by read_records_iter()
438
:param record_details: Details about the record returned by
440
:param base_content: If get_build_details returns a compression_parent,
441
you must return a base_content here, else use None
442
:param copy_base_content: When building from the base_content, decide
443
you can either copy it and return a new object, or modify it in
445
:return: (content, delta) A Content object and possibly a line-delta,
448
method, noeol = record_details
449
if method == 'line-delta':
450
if copy_base_content:
451
content = base_content.copy()
453
content = base_content
454
delta = self.parse_line_delta(record, version_id)
455
content.apply_delta(delta, version_id)
457
content = self.parse_fulltext(record, version_id)
459
content._should_strip_eol = noeol
460
return (content, delta)
463
class KnitAnnotateFactory(_KnitFactory):
464
"""Factory for creating annotated Content objects."""
468
def make(self, lines, version_id):
469
num_lines = len(lines)
470
return AnnotatedKnitContent(zip([version_id] * num_lines, lines))
472
def parse_fulltext(self, content, version_id):
473
"""Convert fulltext to internal representation
475
fulltext content is of the format
476
revid(utf8) plaintext\n
477
internal representation is of the format:
480
# TODO: jam 20070209 The tests expect this to be returned as tuples,
481
# but the code itself doesn't really depend on that.
482
# Figure out a way to not require the overhead of turning the
483
# list back into tuples.
484
lines = [tuple(line.split(' ', 1)) for line in content]
485
return AnnotatedKnitContent(lines)
487
def parse_line_delta_iter(self, lines):
488
return iter(self.parse_line_delta(lines))
490
def parse_line_delta(self, lines, version_id, plain=False):
491
"""Convert a line based delta into internal representation.
493
line delta is in the form of:
494
intstart intend intcount
496
revid(utf8) newline\n
497
internal representation is
498
(start, end, count, [1..count tuples (revid, newline)])
500
:param plain: If True, the lines are returned as a plain
501
list without annotations, not as a list of (origin, content) tuples, i.e.
502
(start, end, count, [1..count newline])
509
def cache_and_return(line):
510
origin, text = line.split(' ', 1)
511
return cache.setdefault(origin, origin), text
513
# walk through the lines parsing.
514
# Note that the plain test is explicitly pulled out of the
515
# loop to minimise any performance impact
518
start, end, count = [int(n) for n in header.split(',')]
519
contents = [next().split(' ', 1)[1] for i in xrange(count)]
520
result.append((start, end, count, contents))
523
start, end, count = [int(n) for n in header.split(',')]
524
contents = [tuple(next().split(' ', 1)) for i in xrange(count)]
525
result.append((start, end, count, contents))
528
def get_fulltext_content(self, lines):
529
"""Extract just the content lines from a fulltext."""
530
return (line.split(' ', 1)[1] for line in lines)
532
def get_linedelta_content(self, lines):
533
"""Extract just the content from a line delta.
535
This doesn't return all of the extra information stored in a delta.
536
Only the actual content lines.
541
header = header.split(',')
542
count = int(header[2])
543
for i in xrange(count):
544
origin, text = next().split(' ', 1)
547
def lower_fulltext(self, content):
548
"""convert a fulltext content record into a serializable form.
550
see parse_fulltext which this inverts.
552
# TODO: jam 20070209 We only do the caching thing to make sure that
553
# the origin is a valid utf-8 line, eventually we could remove it
554
return ['%s %s' % (o, t) for o, t in content._lines]
556
def lower_line_delta(self, delta):
557
"""convert a delta into a serializable form.
559
See parse_line_delta which this inverts.
561
# TODO: jam 20070209 We only do the caching thing to make sure that
562
# the origin is a valid utf-8 line, eventually we could remove it
564
for start, end, c, lines in delta:
565
out.append('%d,%d,%d\n' % (start, end, c))
566
out.extend(origin + ' ' + text
567
for origin, text in lines)
570
def annotate(self, knit, key):
571
content = knit._get_content(key)
572
# adjust for the fact that serialised annotations are only key suffixes
574
if type(key) == tuple:
576
origins = content.annotate()
578
for origin, line in origins:
579
result.append((prefix + (origin,), line))
582
return content.annotate()
585
class KnitPlainFactory(_KnitFactory):
586
"""Factory for creating plain Content objects."""
590
def make(self, lines, version_id):
591
return PlainKnitContent(lines, version_id)
593
def parse_fulltext(self, content, version_id):
594
"""This parses an unannotated fulltext.
596
Note that this is not a noop - the internal representation
597
has (versionid, line) - its just a constant versionid.
599
return self.make(content, version_id)
601
def parse_line_delta_iter(self, lines, version_id):
603
num_lines = len(lines)
604
while cur < num_lines:
607
start, end, c = [int(n) for n in header.split(',')]
608
yield start, end, c, lines[cur:cur+c]
611
def parse_line_delta(self, lines, version_id):
612
return list(self.parse_line_delta_iter(lines, version_id))
614
def get_fulltext_content(self, lines):
615
"""Extract just the content lines from a fulltext."""
618
def get_linedelta_content(self, lines):
619
"""Extract just the content from a line delta.
621
This doesn't return all of the extra information stored in a delta.
622
Only the actual content lines.
627
header = header.split(',')
628
count = int(header[2])
629
for i in xrange(count):
632
def lower_fulltext(self, content):
633
return content.text()
635
def lower_line_delta(self, delta):
637
for start, end, c, lines in delta:
638
out.append('%d,%d,%d\n' % (start, end, c))
642
def annotate(self, knit, key):
643
annotator = _KnitAnnotator(knit)
644
return annotator.annotate(key)
648
def make_file_factory(annotated, mapper):
649
"""Create a factory for creating a file based KnitVersionedFiles.
651
:param annotated: knit annotations are wanted.
652
:param mapper: The mapper from keys to paths.
654
def factory(transport):
655
index = _KndxIndex(transport, mapper, lambda:None, lambda:True, lambda:True)
656
access = _KnitKeyAccess(transport, mapper)
657
return KnitVersionedFiles(index, access, annotated=annotated)
661
def make_pack_factory(graph, delta, keylength):
662
"""Create a factory for creating a pack based VersionedFiles.
664
This is only functional enough to run interface tests, it doesn't try to
665
provide a full pack environment.
667
:param graph: Store a graph.
668
:param delta: Delta compress contents.
669
:param keylength: How long should keys be.
671
def factory(transport):
672
parents = graph or delta
678
max_delta_chain = 200
681
graph_index = _mod_index.InMemoryGraphIndex(reference_lists=ref_length,
682
key_elements=keylength)
683
stream = transport.open_write_stream('newpack')
684
writer = pack.ContainerWriter(stream.write)
686
index = _KnitGraphIndex(graph_index, lambda:True, parents=parents,
687
deltas=delta, add_callback=graph_index.add_nodes)
688
access = _DirectPackAccess({})
689
access.set_writer(writer, graph_index, (transport, 'newpack'))
690
result = KnitVersionedFiles(index, access,
691
max_delta_chain=max_delta_chain)
692
result.stream = stream
693
result.writer = writer
698
def cleanup_pack_knit(versioned_files):
699
versioned_files.stream.close()
700
versioned_files.writer.end()
703
class KnitVersionedFiles(VersionedFiles):
704
"""Storage for many versioned files using knit compression.
706
Backend storage is managed by indices and data objects.
709
def __init__(self, index, data_access, max_delta_chain=200,
711
"""Create a KnitVersionedFiles with index and data_access.
713
:param index: The index for the knit data.
714
:param data_access: The access object to store and retrieve knit
716
:param max_delta_chain: The maximum number of deltas to permit during
717
insertion. Set to 0 to prohibit the use of deltas.
718
:param annotated: Set to True to cause annotations to be calculated and
719
stored during insertion.
722
self._access = data_access
723
self._max_delta_chain = max_delta_chain
725
self._factory = KnitAnnotateFactory()
727
self._factory = KnitPlainFactory()
729
def add_lines(self, key, parents, lines, parent_texts=None,
730
left_matching_blocks=None, nostore_sha=None, random_id=False,
732
"""See VersionedFiles.add_lines()."""
733
self._index._check_write_ok()
734
self._check_add(key, lines, random_id, check_content)
736
# For no-graph knits, have the public interface use None for
739
return self._add(key, lines, parents,
740
parent_texts, left_matching_blocks, nostore_sha, random_id)
742
def _add(self, key, lines, parents, parent_texts,
743
left_matching_blocks, nostore_sha, random_id):
744
"""Add a set of lines on top of version specified by parents.
746
Any versions not present will be converted into ghosts.
748
# first thing, if the content is something we don't need to store, find
750
line_bytes = ''.join(lines)
751
digest = sha_string(line_bytes)
752
if nostore_sha == digest:
753
raise errors.ExistingContent
756
if parent_texts is None:
758
# Do a single query to ascertain parent presence.
759
present_parent_map = self.get_parent_map(parents)
760
for parent in parents:
761
if parent in present_parent_map:
762
present_parents.append(parent)
764
# Currently we can only compress against the left most present parent.
765
if (len(present_parents) == 0 or
766
present_parents[0] != parents[0]):
769
# To speed the extract of texts the delta chain is limited
770
# to a fixed number of deltas. This should minimize both
771
# I/O and the time spend applying deltas.
772
delta = self._check_should_delta(present_parents[0])
774
text_length = len(line_bytes)
777
if lines[-1][-1] != '\n':
778
# copy the contents of lines.
780
options.append('no-eol')
781
lines[-1] = lines[-1] + '\n'
785
if type(element) != str:
786
raise TypeError("key contains non-strings: %r" % (key,))
787
# Knit hunks are still last-element only
789
content = self._factory.make(lines, version_id)
790
if 'no-eol' in options:
791
# Hint to the content object that its text() call should strip the
793
content._should_strip_eol = True
794
if delta or (self._factory.annotated and len(present_parents) > 0):
795
# Merge annotations from parent texts if needed.
796
delta_hunks = self._merge_annotations(content, present_parents,
797
parent_texts, delta, self._factory.annotated,
798
left_matching_blocks)
801
options.append('line-delta')
802
store_lines = self._factory.lower_line_delta(delta_hunks)
803
size, bytes = self._record_to_data(key, digest,
806
options.append('fulltext')
807
# isinstance is slower and we have no hierarchy.
808
if self._factory.__class__ == KnitPlainFactory:
809
# Use the already joined bytes saving iteration time in
811
size, bytes = self._record_to_data(key, digest,
814
# get mixed annotation + content and feed it into the
816
store_lines = self._factory.lower_fulltext(content)
817
size, bytes = self._record_to_data(key, digest,
820
access_memo = self._access.add_raw_records([(key, size)], bytes)[0]
821
self._index.add_records(
822
((key, options, access_memo, parents),),
824
return digest, text_length, content
826
def annotate(self, key):
827
"""See VersionedFiles.annotate."""
828
return self._factory.annotate(self, key)
830
def check(self, progress_bar=None):
831
"""See VersionedFiles.check()."""
832
# This doesn't actually test extraction of everything, but that will
833
# impact 'bzr check' substantially, and needs to be integrated with
834
# care. However, it does check for the obvious problem of a delta with
837
parent_map = self.get_parent_map(keys)
839
if self._index.get_method(key) != 'fulltext':
840
compression_parent = parent_map[key][0]
841
if compression_parent not in parent_map:
842
raise errors.KnitCorrupt(self,
843
"Missing basis parent %s for %s" % (
844
compression_parent, key))
846
def _check_add(self, key, lines, random_id, check_content):
847
"""check that version_id and lines are safe to add."""
848
if contains_whitespace(key[-1]):
849
raise InvalidRevisionId(key[-1], self.filename)
850
self.check_not_reserved_id(key[-1])
851
# Technically this could be avoided if we are happy to allow duplicate
852
# id insertion when other things than bzr core insert texts, but it
853
# seems useful for folk using the knit api directly to have some safety
854
# blanket that we can disable.
855
##if not random_id and self.has_version(key):
856
## raise RevisionAlreadyPresent(key, self)
858
self._check_lines_not_unicode(lines)
859
self._check_lines_are_lines(lines)
861
def _check_header(self, key, line):
862
rec = self._split_header(line)
863
self._check_header_version(rec, key[-1])
866
def _check_header_version(self, rec, version_id):
867
"""Checks the header version on original format knit records.
869
These have the last component of the key embedded in the record.
871
if rec[1] != version_id:
872
raise KnitCorrupt(self,
873
'unexpected version, wanted %r, got %r' % (version_id, rec[1]))
875
def _check_should_delta(self, parent):
876
"""Iterate back through the parent listing, looking for a fulltext.
878
This is used when we want to decide whether to add a delta or a new
879
fulltext. It searches for _max_delta_chain parents. When it finds a
880
fulltext parent, it sees if the total size of the deltas leading up to
881
it is large enough to indicate that we want a new full text anyway.
883
Return True if we should create a new delta, False if we should use a
888
for count in xrange(self._max_delta_chain):
889
# XXX: Collapse these two queries:
890
method = self._index.get_method(parent)
891
index, pos, size = self._index.get_position(parent)
892
if method == 'fulltext':
896
# No exception here because we stop at first fulltext anyway, an
897
# absent parent indicates a corrupt knit anyway.
898
# TODO: This should be asking for compression parent, not graph
900
parent = self._index.get_parent_map([parent])[parent][0]
902
# We couldn't find a fulltext, so we must create a new one
904
# Simple heuristic - if the total I/O wold be greater as a delta than
905
# the originally installed fulltext, we create a new fulltext.
906
return fulltext_size > delta_size
908
def _build_details_to_components(self, build_details):
909
"""Convert a build_details tuple to a position tuple."""
910
# record_details, access_memo, compression_parent
911
return build_details[3], build_details[0], build_details[1]
913
def _get_components_positions(self, keys, noraise=False):
914
"""Produce a map of position data for the components of keys.
916
This data is intended to be used for retrieving the knit records.
918
A dict of key to (record_details, index_memo, next, parents) is
920
method is the way referenced data should be applied.
921
index_memo is the handle to pass to the data access to actually get the
923
next is the build-parent of the version, or None for fulltexts.
924
parents is the version_ids of the parents of this version
926
:param noraise: If True do not raise an error on a missing component,
930
pending_components = keys
931
while pending_components:
932
build_details = self._index.get_build_details(pending_components)
933
current_components = set(pending_components)
934
pending_components = set()
935
for key, details in build_details.iteritems():
936
(index_memo, compression_parent, parents,
937
record_details) = details
938
method = record_details[0]
939
if compression_parent is not None:
940
pending_components.add(compression_parent)
941
component_data[key] = self._build_details_to_components(details)
942
missing = current_components.difference(build_details)
943
if missing and not noraise:
944
raise errors.RevisionNotPresent(missing.pop(), self)
945
return component_data
947
def _get_content(self, key, parent_texts={}):
948
"""Returns a content object that makes up the specified
950
cached_version = parent_texts.get(key, None)
951
if cached_version is not None:
952
# Ensure the cache dict is valid.
953
if not self.get_parent_map([key]):
954
raise RevisionNotPresent(key, self)
955
return cached_version
956
text_map, contents_map = self._get_content_maps([key])
957
return contents_map[key]
959
def _get_content_maps(self, keys):
960
"""Produce maps of text and KnitContents
962
:return: (text_map, content_map) where text_map contains the texts for
963
the requested versions and content_map contains the KnitContents.
965
# FUTURE: This function could be improved for the 'extract many' case
966
# by tracking each component and only doing the copy when the number of
967
# children than need to apply delta's to it is > 1 or it is part of the
970
multiple_versions = len(keys) != 1
971
record_map = self._get_record_map(keys)
979
while cursor is not None:
980
record, record_details, digest, next = record_map[cursor]
981
components.append((cursor, record, record_details, digest))
982
if cursor in content_map:
987
for (component_id, record, record_details,
988
digest) in reversed(components):
989
if component_id in content_map:
990
content = content_map[component_id]
992
content, delta = self._factory.parse_record(key[-1],
993
record, record_details, content,
994
copy_base_content=multiple_versions)
995
if multiple_versions:
996
content_map[component_id] = content
998
final_content[key] = content
1000
# digest here is the digest from the last applied component.
1001
text = content.text()
1002
actual_sha = sha_strings(text)
1003
if actual_sha != digest:
1004
raise KnitCorrupt(self,
1006
'\n of reconstructed text does not match'
1008
'\n for version %s' %
1009
(actual_sha, digest, key))
1010
text_map[key] = text
1011
return text_map, final_content
1013
def get_parent_map(self, keys):
1014
"""Get a map of the parents of keys.
1016
:param keys: The keys to look up parents for.
1017
:return: A mapping from keys to parents. Absent keys are absent from
1020
return self._index.get_parent_map(keys)
1022
def _get_record_map(self, keys):
1023
"""Produce a dictionary of knit records.
1025
:return: {key:(record, record_details, digest, next)}
1027
data returned from read_records
1029
opaque information to pass to parse_record
1031
SHA1 digest of the full text after all steps are done
1033
build-parent of the version, i.e. the leftmost ancestor.
1034
Will be None if the record is not a delta.
1036
position_map = self._get_components_positions(keys)
1037
# key = component_id, r = record_details, i_m = index_memo, n = next
1038
records = [(key, i_m) for key, (r, i_m, n)
1039
in position_map.iteritems()]
1041
for key, record, digest in \
1042
self._read_records_iter(records):
1043
(record_details, index_memo, next) = position_map[key]
1044
record_map[key] = record, record_details, digest, next
1047
def get_record_stream(self, keys, ordering, include_delta_closure):
1048
"""Get a stream of records for keys.
1050
:param keys: The keys to include.
1051
:param ordering: Either 'unordered' or 'topological'. A topologically
1052
sorted stream has compression parents strictly before their
1054
:param include_delta_closure: If True then the closure across any
1055
compression parents will be included (in the opaque data).
1056
:return: An iterator of ContentFactory objects, each of which is only
1057
valid until the iterator is advanced.
1059
# keys might be a generator
1061
if not self._index.has_graph:
1062
# Cannot topological order when no graph has been stored.
1063
ordering = 'unordered'
1064
if include_delta_closure:
1065
positions = self._get_components_positions(keys, noraise=True)
1067
build_details = self._index.get_build_details(keys)
1068
positions = dict((key, self._build_details_to_components(details))
1069
for key, details in build_details.iteritems())
1070
absent_keys = keys.difference(set(positions))
1071
# There may be more absent keys : if we're missing the basis component
1072
# and are trying to include the delta closure.
1073
if include_delta_closure:
1074
# key:True means key can be reconstructed
1079
chain = [key, positions[key][2]]
1081
absent_keys.add(key)
1084
while chain[-1] is not None:
1085
if chain[-1] in checked_keys:
1086
result = checked_keys[chain[-1]]
1090
chain.append(positions[chain[-1]][2])
1092
# missing basis component
1095
for chain_key in chain[:-1]:
1096
checked_keys[chain_key] = result
1098
absent_keys.add(key)
1099
for key in absent_keys:
1100
yield AbsentContentFactory(key)
1101
# restrict our view to the keys we can answer.
1102
keys = keys - absent_keys
1103
# Double index lookups here : need a unified api ?
1104
parent_map = self.get_parent_map(keys)
1105
if ordering == 'topological':
1106
present_keys = topo_sort(parent_map)
1109
# XXX: Memory: TODO: batch data here to cap buffered data at (say) 1MB.
1110
# XXX: At that point we need to consider double reads by utilising
1111
# components multiple times.
1112
if include_delta_closure:
1113
# XXX: get_content_maps performs its own index queries; allow state
1115
text_map, _ = self._get_content_maps(present_keys)
1116
for key in present_keys:
1117
yield FulltextContentFactory(key, parent_map[key], None,
1118
''.join(text_map[key]))
1120
records = [(key, positions[key][1]) for key in present_keys]
1121
for key, raw_data, sha1 in self._read_records_iter_raw(records):
1122
(record_details, index_memo, _) = positions[key]
1123
yield KnitContentFactory(key, parent_map[key],
1124
record_details, sha1, raw_data, self._factory.annotated, None)
1126
def get_sha1s(self, keys):
1127
"""See VersionedFiles.get_sha1s()."""
1128
record_map = self._get_record_map(keys)
1129
# record entry 2 is the 'digest'.
1130
return [record_map[key][2] for key in keys]
1132
def insert_record_stream(self, stream):
1133
"""Insert a record stream into this container.
1135
:param stream: A stream of records to insert.
1137
:seealso VersionedFiles.get_record_stream:
1139
def get_adapter(adapter_key):
1141
return adapters[adapter_key]
1143
adapter_factory = adapter_registry.get(adapter_key)
1144
adapter = adapter_factory(self)
1145
adapters[adapter_key] = adapter
1147
if self._factory.annotated:
1148
# self is annotated, we need annotated knits to use directly.
1149
annotated = "annotated-"
1152
# self is not annotated, but we can strip annotations cheaply.
1154
convertibles = set(["knit-annotated-ft-gz"])
1155
if self._max_delta_chain:
1156
convertibles.add("knit-annotated-delta-gz")
1157
# The set of types we can cheaply adapt without needing basis texts.
1158
native_types = set()
1159
if self._max_delta_chain:
1160
native_types.add("knit-%sdelta-gz" % annotated)
1161
native_types.add("knit-%sft-gz" % annotated)
1162
knit_types = native_types.union(convertibles)
1164
# Buffer all index entries that we can't add immediately because their
1165
# basis parent is missing. We don't buffer all because generating
1166
# annotations may require access to some of the new records. However we
1167
# can't generate annotations from new deltas until their basis parent
1168
# is present anyway, so we get away with not needing an index that
1169
# includes the new keys.
1170
# key = basis_parent, value = index entry to add
1171
buffered_index_entries = {}
1172
for record in stream:
1173
parents = record.parents
1174
# Raise an error when a record is missing.
1175
if record.storage_kind == 'absent':
1176
raise RevisionNotPresent([record.key], self)
1177
if record.storage_kind in knit_types:
1178
if record.storage_kind not in native_types:
1180
adapter_key = (record.storage_kind, "knit-delta-gz")
1181
adapter = get_adapter(adapter_key)
1183
adapter_key = (record.storage_kind, "knit-ft-gz")
1184
adapter = get_adapter(adapter_key)
1185
bytes = adapter.get_bytes(
1186
record, record.get_bytes_as(record.storage_kind))
1188
bytes = record.get_bytes_as(record.storage_kind)
1189
options = [record._build_details[0]]
1190
if record._build_details[1]:
1191
options.append('no-eol')
1192
# Just blat it across.
1193
# Note: This does end up adding data on duplicate keys. As
1194
# modern repositories use atomic insertions this should not
1195
# lead to excessive growth in the event of interrupted fetches.
1196
# 'knit' repositories may suffer excessive growth, but as a
1197
# deprecated format this is tolerable. It can be fixed if
1198
# needed by in the kndx index support raising on a duplicate
1199
# add with identical parents and options.
1200
access_memo = self._access.add_raw_records(
1201
[(record.key, len(bytes))], bytes)[0]
1202
index_entry = (record.key, options, access_memo, parents)
1204
if 'fulltext' not in options:
1205
basis_parent = parents[0]
1206
# Note that pack backed knits don't need to buffer here
1207
# because they buffer all writes to the transaction level,
1208
# but we don't expose that differnet at the index level. If
1209
# the query here has sufficient cost to show up in
1210
# profiling we should do that.
1211
if basis_parent not in self.get_parent_map([basis_parent]):
1212
pending = buffered_index_entries.setdefault(
1214
pending.append(index_entry)
1217
self._index.add_records([index_entry])
1218
elif record.storage_kind == 'fulltext':
1219
self.add_lines(record.key, parents,
1220
split_lines(record.get_bytes_as('fulltext')))
1222
adapter_key = record.storage_kind, 'fulltext'
1223
adapter = get_adapter(adapter_key)
1224
lines = split_lines(adapter.get_bytes(
1225
record, record.get_bytes_as(record.storage_kind)))
1227
self.add_lines(record.key, parents, lines)
1228
except errors.RevisionAlreadyPresent:
1230
# Add any records whose basis parent is now available.
1231
added_keys = [record.key]
1233
key = added_keys.pop(0)
1234
if key in buffered_index_entries:
1235
index_entries = buffered_index_entries[key]
1236
self._index.add_records(index_entries)
1238
[index_entry[0] for index_entry in index_entries])
1239
del buffered_index_entries[key]
1240
# If there were any deltas which had a missing basis parent, error.
1241
if buffered_index_entries:
1242
raise errors.RevisionNotPresent(buffered_index_entries.keys()[0],
1245
def iter_lines_added_or_present_in_keys(self, keys, pb=None):
1246
"""Iterate over the lines in the versioned files from keys.
1248
This may return lines from other keys. Each item the returned
1249
iterator yields is a tuple of a line and a text version that that line
1250
is present in (not introduced in).
1252
Ordering of results is in whatever order is most suitable for the
1253
underlying storage format.
1255
If a progress bar is supplied, it may be used to indicate progress.
1256
The caller is responsible for cleaning up progress bars (because this
1260
* Lines are normalised by the underlying store: they will all have \n
1262
* Lines are returned in arbitrary order.
1264
:return: An iterator over (line, key).
1267
pb = progress.DummyProgress()
1269
# filter for available keys
1270
parent_map = self.get_parent_map(keys)
1271
if len(parent_map) != len(keys):
1272
missing = set(parent_map) - requested_keys
1273
raise RevisionNotPresent(key, self.filename)
1274
# we don't care about inclusions, the caller cares.
1275
# but we need to setup a list of records to visit.
1276
# we need key, position, length
1278
build_details = self._index.get_build_details(keys)
1280
key_records.append((key, build_details[key][0]))
1281
total = len(key_records)
1282
for key_idx, (key, data, sha_value) in \
1283
enumerate(self._read_records_iter(key_records)):
1284
pb.update('Walking content.', key_idx, total)
1285
compression_parent = build_details[key][1]
1286
if compression_parent is None:
1288
line_iterator = self._factory.get_fulltext_content(data)
1291
line_iterator = self._factory.get_linedelta_content(data)
1292
# XXX: It might be more efficient to yield (key,
1293
# line_iterator) in the future. However for now, this is a simpler
1294
# change to integrate into the rest of the codebase. RBC 20071110
1295
for line in line_iterator:
1297
pb.update('Walking content.', total, total)
1299
def _make_line_delta(self, delta_seq, new_content):
1300
"""Generate a line delta from delta_seq and new_content."""
1302
for op in delta_seq.get_opcodes():
1303
if op[0] == 'equal':
1305
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
1308
def _merge_annotations(self, content, parents, parent_texts={},
1309
delta=None, annotated=None,
1310
left_matching_blocks=None):
1311
"""Merge annotations for content and generate deltas.
1313
This is done by comparing the annotations based on changes to the text
1314
and generating a delta on the resulting full texts. If annotations are
1315
not being created then a simple delta is created.
1317
if left_matching_blocks is not None:
1318
delta_seq = diff._PrematchedMatcher(left_matching_blocks)
1322
for parent_key in parents:
1323
merge_content = self._get_content(parent_key, parent_texts)
1324
if (parent_key == parents[0] and delta_seq is not None):
1327
seq = patiencediff.PatienceSequenceMatcher(
1328
None, merge_content.text(), content.text())
1329
for i, j, n in seq.get_matching_blocks():
1332
# this copies (origin, text) pairs across to the new
1333
# content for any line that matches the last-checked
1335
content._lines[j:j+n] = merge_content._lines[i:i+n]
1336
if content._lines and content._lines[-1][1][-1] != '\n':
1337
# The copied annotation was from a line without a trailing EOL,
1338
# reinstate one for the content object, to ensure correct
1340
line = content._lines[-1][1] + '\n'
1341
content._lines[-1] = (content._lines[-1][0], line)
1343
if delta_seq is None:
1344
reference_content = self._get_content(parents[0], parent_texts)
1345
new_texts = content.text()
1346
old_texts = reference_content.text()
1347
delta_seq = patiencediff.PatienceSequenceMatcher(
1348
None, old_texts, new_texts)
1349
return self._make_line_delta(delta_seq, content)
1351
def _parse_record(self, version_id, data):
1352
"""Parse an original format knit record.
1354
These have the last element of the key only present in the stored data.
1356
rec, record_contents = self._parse_record_unchecked(data)
1357
self._check_header_version(rec, version_id)
1358
return record_contents, rec[3]
1360
def _parse_record_header(self, key, raw_data):
1361
"""Parse a record header for consistency.
1363
:return: the header and the decompressor stream.
1364
as (stream, header_record)
1366
df = GzipFile(mode='rb', fileobj=StringIO(raw_data))
1369
rec = self._check_header(key, df.readline())
1370
except Exception, e:
1371
raise KnitCorrupt(self,
1372
"While reading {%s} got %s(%s)"
1373
% (key, e.__class__.__name__, str(e)))
1376
def _parse_record_unchecked(self, data):
1378
# 4168 calls in 2880 217 internal
1379
# 4168 calls to _parse_record_header in 2121
1380
# 4168 calls to readlines in 330
1381
df = GzipFile(mode='rb', fileobj=StringIO(data))
1383
record_contents = df.readlines()
1384
except Exception, e:
1385
raise KnitCorrupt(self, "Corrupt compressed record %r, got %s(%s)" %
1386
(data, e.__class__.__name__, str(e)))
1387
header = record_contents.pop(0)
1388
rec = self._split_header(header)
1389
last_line = record_contents.pop()
1390
if len(record_contents) != int(rec[2]):
1391
raise KnitCorrupt(self,
1392
'incorrect number of lines %s != %s'
1393
' for version {%s} %s'
1394
% (len(record_contents), int(rec[2]),
1395
rec[1], record_contents))
1396
if last_line != 'end %s\n' % rec[1]:
1397
raise KnitCorrupt(self,
1398
'unexpected version end line %r, wanted %r'
1399
% (last_line, rec[1]))
1401
return rec, record_contents
1403
def _read_records_iter(self, records):
1404
"""Read text records from data file and yield result.
1406
The result will be returned in whatever is the fastest to read.
1407
Not by the order requested. Also, multiple requests for the same
1408
record will only yield 1 response.
1409
:param records: A list of (key, access_memo) entries
1410
:return: Yields (key, contents, digest) in the order
1411
read, not the order requested
1416
# XXX: This smells wrong, IO may not be getting ordered right.
1417
needed_records = sorted(set(records), key=operator.itemgetter(1))
1418
if not needed_records:
1421
# The transport optimizes the fetching as well
1422
# (ie, reads continuous ranges.)
1423
raw_data = self._access.get_raw_records(
1424
[index_memo for key, index_memo in needed_records])
1426
for (key, index_memo), data in \
1427
izip(iter(needed_records), raw_data):
1428
content, digest = self._parse_record(key[-1], data)
1429
yield key, content, digest
1431
def _read_records_iter_raw(self, records):
1432
"""Read text records from data file and yield raw data.
1434
This unpacks enough of the text record to validate the id is
1435
as expected but thats all.
1437
Each item the iterator yields is (key, bytes, sha1_of_full_text).
1439
# setup an iterator of the external records:
1440
# uses readv so nice and fast we hope.
1442
# grab the disk data needed.
1443
needed_offsets = [index_memo for key, index_memo
1445
raw_records = self._access.get_raw_records(needed_offsets)
1447
for key, index_memo in records:
1448
data = raw_records.next()
1449
# validate the header (note that we can only use the suffix in
1450
# current knit records).
1451
df, rec = self._parse_record_header(key, data)
1453
yield key, data, rec[3]
1455
def _record_to_data(self, key, digest, lines, dense_lines=None):
1456
"""Convert key, digest, lines into a raw data block.
1458
:param key: The key of the record. Currently keys are always serialised
1459
using just the trailing component.
1460
:param dense_lines: The bytes of lines but in a denser form. For
1461
instance, if lines is a list of 1000 bytestrings each ending in \n,
1462
dense_lines may be a list with one line in it, containing all the
1463
1000's lines and their \n's. Using dense_lines if it is already
1464
known is a win because the string join to create bytes in this
1465
function spends less time resizing the final string.
1466
:return: (len, a StringIO instance with the raw data ready to read.)
1468
# Note: using a string copy here increases memory pressure with e.g.
1469
# ISO's, but it is about 3 seconds faster on a 1.2Ghz intel machine
1470
# when doing the initial commit of a mozilla tree. RBC 20070921
1471
bytes = ''.join(chain(
1472
["version %s %d %s\n" % (key[-1],
1475
dense_lines or lines,
1476
["end %s\n" % key[-1]]))
1477
if type(bytes) != str:
1478
raise AssertionError(
1479
'data must be plain bytes was %s' % type(bytes))
1480
if lines and lines[-1][-1] != '\n':
1481
raise ValueError('corrupt lines value %r' % lines)
1482
compressed_bytes = bytes_to_gzip(bytes)
1483
return len(compressed_bytes), compressed_bytes
1485
def _split_header(self, line):
1488
raise KnitCorrupt(self,
1489
'unexpected number of elements in record header')
1493
"""See VersionedFiles.keys."""
1494
if 'evil' in debug.debug_flags:
1495
trace.mutter_callsite(2, "keys scales with size of history")
1496
return self._index.keys()
1499
class _KndxIndex(object):
1500
"""Manages knit index files
1502
The index is kept in memorya already kept in memory and read on startup, to enable
1503
fast lookups of revision information. The cursor of the index
1504
file is always pointing to the end, making it easy to append
1507
_cache is a cache for fast mapping from version id to a Index
1510
_history is a cache for fast mapping from indexes to version ids.
1512
The index data format is dictionary compressed when it comes to
1513
parent references; a index entry may only have parents that with a
1514
lover index number. As a result, the index is topological sorted.
1516
Duplicate entries may be written to the index for a single version id
1517
if this is done then the latter one completely replaces the former:
1518
this allows updates to correct version and parent information.
1519
Note that the two entries may share the delta, and that successive
1520
annotations and references MUST point to the first entry.
1522
The index file on disc contains a header, followed by one line per knit
1523
record. The same revision can be present in an index file more than once.
1524
The first occurrence gets assigned a sequence number starting from 0.
1526
The format of a single line is
1527
REVISION_ID FLAGS BYTE_OFFSET LENGTH( PARENT_ID|PARENT_SEQUENCE_ID)* :\n
1528
REVISION_ID is a utf8-encoded revision id
1529
FLAGS is a comma separated list of flags about the record. Values include
1530
no-eol, line-delta, fulltext.
1531
BYTE_OFFSET is the ascii representation of the byte offset in the data file
1532
that the the compressed data starts at.
1533
LENGTH is the ascii representation of the length of the data file.
1534
PARENT_ID a utf-8 revision id prefixed by a '.' that is a parent of
1536
PARENT_SEQUENCE_ID the ascii representation of the sequence number of a
1537
revision id already in the knit that is a parent of REVISION_ID.
1538
The ' :' marker is the end of record marker.
1541
when a write is interrupted to the index file, it will result in a line
1542
that does not end in ' :'. If the ' :' is not present at the end of a line,
1543
or at the end of the file, then the record that is missing it will be
1544
ignored by the parser.
1546
When writing new records to the index file, the data is preceded by '\n'
1547
to ensure that records always start on new lines even if the last write was
1548
interrupted. As a result its normal for the last line in the index to be
1549
missing a trailing newline. One can be added with no harmful effects.
1552
HEADER = "# bzr knit index 8\n"
1554
def __init__(self, transport, mapper, get_scope, allow_writes, is_locked):
1555
"""Create a _KndxIndex on transport using mapper."""
1556
self._transport = transport
1557
self._mapper = mapper
1558
self._get_scope = get_scope
1559
self._allow_writes = allow_writes
1560
self._is_locked = is_locked
1562
self.has_graph = True
1564
def add_records(self, records, random_id=False):
1565
"""Add multiple records to the index.
1567
:param records: a list of tuples:
1568
(key, options, access_memo, parents).
1569
:param random_id: If True the ids being added were randomly generated
1570
and no check for existence will be performed.
1573
for record in records:
1576
path = self._mapper.map(key) + '.kndx'
1577
path_keys = paths.setdefault(path, (prefix, []))
1578
path_keys[1].append(record)
1579
for path in sorted(paths):
1580
prefix, path_keys = paths[path]
1581
self._load_prefixes([prefix])
1583
orig_history = self._kndx_cache[prefix][1][:]
1584
orig_cache = self._kndx_cache[prefix][0].copy()
1587
for key, options, (_, pos, size), parents in path_keys:
1589
# kndx indices cannot be parentless.
1591
line = "\n%s %s %s %s %s :" % (
1592
key[-1], ','.join(options), pos, size,
1593
self._dictionary_compress(parents))
1594
if type(line) != str:
1595
raise AssertionError(
1596
'data must be utf8 was %s' % type(line))
1598
self._cache_key(key, options, pos, size, parents)
1599
if len(orig_history):
1600
self._transport.append_bytes(path, ''.join(lines))
1602
self._init_index(path, lines)
1604
# If any problems happen, restore the original values and re-raise
1605
self._kndx_cache[prefix] = (orig_cache, orig_history)
1608
def _cache_key(self, key, options, pos, size, parent_keys):
1609
"""Cache a version record in the history array and index cache.
1611
This is inlined into _load_data for performance. KEEP IN SYNC.
1612
(It saves 60ms, 25% of the __init__ overhead on local 4000 record
1616
version_id = key[-1]
1617
# last-element only for compatibilty with the C load_data.
1618
parents = tuple(parent[-1] for parent in parent_keys)
1619
for parent in parent_keys:
1620
if parent[:-1] != prefix:
1621
raise ValueError("mismatched prefixes for %r, %r" % (
1623
cache, history = self._kndx_cache[prefix]
1624
# only want the _history index to reference the 1st index entry
1626
if version_id not in cache:
1627
index = len(history)
1628
history.append(version_id)
1630
index = cache[version_id][5]
1631
cache[version_id] = (version_id,
1638
def check_header(self, fp):
1639
line = fp.readline()
1641
# An empty file can actually be treated as though the file doesn't
1643
raise errors.NoSuchFile(self)
1644
if line != self.HEADER:
1645
raise KnitHeaderError(badline=line, filename=self)
1647
def _check_read(self):
1648
if not self._is_locked():
1649
raise errors.ObjectNotLocked(self)
1650
if self._get_scope() != self._scope:
1653
def _check_write_ok(self):
1654
"""Assert if not writes are permitted."""
1655
if not self._is_locked():
1656
raise errors.ObjectNotLocked(self)
1657
if self._get_scope() != self._scope:
1659
if self._mode != 'w':
1660
raise errors.ReadOnlyObjectDirtiedError(self)
1662
def get_build_details(self, keys):
1663
"""Get the method, index_memo and compression parent for keys.
1665
Ghosts are omitted from the result.
1667
:param keys: An iterable of keys.
1668
:return: A dict of key:(access_memo, compression_parent, parents,
1671
opaque structure to pass to read_records to extract the raw
1674
Content that this record is built upon, may be None
1676
Logical parents of this node
1678
extra information about the content which needs to be passed to
1679
Factory.parse_record
1681
prefixes = self._partition_keys(keys)
1682
parent_map = self.get_parent_map(keys)
1685
if key not in parent_map:
1687
method = self.get_method(key)
1688
parents = parent_map[key]
1689
if method == 'fulltext':
1690
compression_parent = None
1692
compression_parent = parents[0]
1693
noeol = 'no-eol' in self.get_options(key)
1694
index_memo = self.get_position(key)
1695
result[key] = (index_memo, compression_parent,
1696
parents, (method, noeol))
1699
def get_method(self, key):
1700
"""Return compression method of specified key."""
1701
options = self.get_options(key)
1702
if 'fulltext' in options:
1704
elif 'line-delta' in options:
1707
raise errors.KnitIndexUnknownMethod(self, options)
1709
def get_options(self, key):
1710
"""Return a list representing options.
1714
prefix, suffix = self._split_key(key)
1715
self._load_prefixes([prefix])
1716
return self._kndx_cache[prefix][0][suffix][1]
1718
def get_parent_map(self, keys):
1719
"""Get a map of the parents of keys.
1721
:param keys: The keys to look up parents for.
1722
:return: A mapping from keys to parents. Absent keys are absent from
1725
# Parse what we need to up front, this potentially trades off I/O
1726
# locality (.kndx and .knit in the same block group for the same file
1727
# id) for less checking in inner loops.
1729
prefixes.update(key[:-1] for key in keys)
1730
self._load_prefixes(prefixes)
1735
suffix_parents = self._kndx_cache[prefix][0][key[-1]][4]
1739
result[key] = tuple(prefix + (suffix,) for
1740
suffix in suffix_parents)
1743
def get_position(self, key):
1744
"""Return details needed to access the version.
1746
:return: a tuple (key, data position, size) to hand to the access
1747
logic to get the record.
1749
prefix, suffix = self._split_key(key)
1750
self._load_prefixes([prefix])
1751
entry = self._kndx_cache[prefix][0][suffix]
1752
return key, entry[2], entry[3]
1754
def _init_index(self, path, extra_lines=[]):
1755
"""Initialize an index."""
1757
sio.write(self.HEADER)
1758
sio.writelines(extra_lines)
1760
self._transport.put_file_non_atomic(path, sio,
1761
create_parent_dir=True)
1762
# self._create_parent_dir)
1763
# mode=self._file_mode,
1764
# dir_mode=self._dir_mode)
1767
"""Get all the keys in the collection.
1769
The keys are not ordered.
1772
# Identify all key prefixes.
1773
# XXX: A bit hacky, needs polish.
1774
if type(self._mapper) == ConstantMapper:
1778
for quoted_relpath in self._transport.iter_files_recursive():
1779
path, ext = os.path.splitext(quoted_relpath)
1781
prefixes = [self._mapper.unmap(path) for path in relpaths]
1782
self._load_prefixes(prefixes)
1783
for prefix in prefixes:
1784
for suffix in self._kndx_cache[prefix][1]:
1785
result.add(prefix + (suffix,))
1788
def _load_prefixes(self, prefixes):
1789
"""Load the indices for prefixes."""
1791
for prefix in prefixes:
1792
if prefix not in self._kndx_cache:
1793
# the load_data interface writes to these variables.
1796
self._filename = prefix
1798
path = self._mapper.map(prefix) + '.kndx'
1799
fp = self._transport.get(path)
1801
# _load_data may raise NoSuchFile if the target knit is
1803
_load_data(self, fp)
1806
self._kndx_cache[prefix] = (self._cache, self._history)
1811
self._kndx_cache[prefix] = ({}, [])
1812
if type(self._mapper) == ConstantMapper:
1813
# preserve behaviour for revisions.kndx etc.
1814
self._init_index(path)
1819
def _partition_keys(self, keys):
1820
"""Turn keys into a dict of prefix:suffix_list."""
1823
prefix_keys = result.setdefault(key[:-1], [])
1824
prefix_keys.append(key[-1])
1827
def _dictionary_compress(self, keys):
1828
"""Dictionary compress keys.
1830
:param keys: The keys to generate references to.
1831
:return: A string representation of keys. keys which are present are
1832
dictionary compressed, and others are emitted as fulltext with a
1838
prefix = keys[0][:-1]
1839
cache = self._kndx_cache[prefix][0]
1841
if key[:-1] != prefix:
1842
# kndx indices cannot refer across partitioned storage.
1843
raise ValueError("mismatched prefixes for %r" % keys)
1844
if key[-1] in cache:
1845
# -- inlined lookup() --
1846
result_list.append(str(cache[key[-1]][5]))
1847
# -- end lookup () --
1849
result_list.append('.' + key[-1])
1850
return ' '.join(result_list)
1852
def _reset_cache(self):
1853
# Possibly this should be a LRU cache. A dictionary from key_prefix to
1854
# (cache_dict, history_vector) for parsed kndx files.
1855
self._kndx_cache = {}
1856
self._scope = self._get_scope()
1857
allow_writes = self._allow_writes()
1863
def _split_key(self, key):
1864
"""Split key into a prefix and suffix."""
1865
return key[:-1], key[-1]
1868
class _KnitGraphIndex(object):
1869
"""A KnitVersionedFiles index layered on GraphIndex."""
1871
def __init__(self, graph_index, is_locked, deltas=False, parents=True,
1873
"""Construct a KnitGraphIndex on a graph_index.
1875
:param graph_index: An implementation of bzrlib.index.GraphIndex.
1876
:param is_locked: A callback to check whether the object should answer
1878
:param deltas: Allow delta-compressed records.
1879
:param parents: If True, record knits parents, if not do not record
1881
:param add_callback: If not None, allow additions to the index and call
1882
this callback with a list of added GraphIndex nodes:
1883
[(node, value, node_refs), ...]
1884
:param is_locked: A callback, returns True if the index is locked and
1887
self._add_callback = add_callback
1888
self._graph_index = graph_index
1889
self._deltas = deltas
1890
self._parents = parents
1891
if deltas and not parents:
1892
# XXX: TODO: Delta tree and parent graph should be conceptually
1894
raise KnitCorrupt(self, "Cannot do delta compression without "
1896
self.has_graph = parents
1897
self._is_locked = is_locked
1899
def add_records(self, records, random_id=False):
1900
"""Add multiple records to the index.
1902
This function does not insert data into the Immutable GraphIndex
1903
backing the KnitGraphIndex, instead it prepares data for insertion by
1904
the caller and checks that it is safe to insert then calls
1905
self._add_callback with the prepared GraphIndex nodes.
1907
:param records: a list of tuples:
1908
(key, options, access_memo, parents).
1909
:param random_id: If True the ids being added were randomly generated
1910
and no check for existence will be performed.
1912
if not self._add_callback:
1913
raise errors.ReadOnlyError(self)
1914
# we hope there are no repositories with inconsistent parentage
1918
for (key, options, access_memo, parents) in records:
1920
parents = tuple(parents)
1921
index, pos, size = access_memo
1922
if 'no-eol' in options:
1926
value += "%d %d" % (pos, size)
1927
if not self._deltas:
1928
if 'line-delta' in options:
1929
raise KnitCorrupt(self, "attempt to add line-delta in non-delta knit")
1932
if 'line-delta' in options:
1933
node_refs = (parents, (parents[0],))
1935
node_refs = (parents, ())
1937
node_refs = (parents, )
1940
raise KnitCorrupt(self, "attempt to add node with parents "
1941
"in parentless index.")
1943
keys[key] = (value, node_refs)
1946
present_nodes = self._get_entries(keys)
1947
for (index, key, value, node_refs) in present_nodes:
1948
if (value[0] != keys[key][0][0] or
1949
node_refs != keys[key][1]):
1950
raise KnitCorrupt(self, "inconsistent details in add_records"
1951
": %s %s" % ((value, node_refs), keys[key]))
1955
for key, (value, node_refs) in keys.iteritems():
1956
result.append((key, value, node_refs))
1958
for key, (value, node_refs) in keys.iteritems():
1959
result.append((key, value))
1960
self._add_callback(result)
1962
def _check_read(self):
1963
"""raise if reads are not permitted."""
1964
if not self._is_locked():
1965
raise errors.ObjectNotLocked(self)
1967
def _check_write_ok(self):
1968
"""Assert if writes are not permitted."""
1969
if not self._is_locked():
1970
raise errors.ObjectNotLocked(self)
1972
def _compression_parent(self, an_entry):
1973
# return the key that an_entry is compressed against, or None
1974
# Grab the second parent list (as deltas implies parents currently)
1975
compression_parents = an_entry[3][1]
1976
if not compression_parents:
1978
if len(compression_parents) != 1:
1979
raise AssertionError(
1980
"Too many compression parents: %r" % compression_parents)
1981
return compression_parents[0]
1983
def get_build_details(self, keys):
1984
"""Get the method, index_memo and compression parent for version_ids.
1986
Ghosts are omitted from the result.
1988
:param keys: An iterable of keys.
1989
:return: A dict of key:
1990
(index_memo, compression_parent, parents, record_details).
1992
opaque structure to pass to read_records to extract the raw
1995
Content that this record is built upon, may be None
1997
Logical parents of this node
1999
extra information about the content which needs to be passed to
2000
Factory.parse_record
2004
entries = self._get_entries(keys, False)
2005
for entry in entries:
2007
if not self._parents:
2010
parents = entry[3][0]
2011
if not self._deltas:
2012
compression_parent_key = None
2014
compression_parent_key = self._compression_parent(entry)
2015
noeol = (entry[2][0] == 'N')
2016
if compression_parent_key:
2017
method = 'line-delta'
2020
result[key] = (self._node_to_position(entry),
2021
compression_parent_key, parents,
2025
def _get_entries(self, keys, check_present=False):
2026
"""Get the entries for keys.
2028
:param keys: An iterable of index key tuples.
2033
for node in self._graph_index.iter_entries(keys):
2035
found_keys.add(node[1])
2037
# adapt parentless index to the rest of the code.
2038
for node in self._graph_index.iter_entries(keys):
2039
yield node[0], node[1], node[2], ()
2040
found_keys.add(node[1])
2042
missing_keys = keys.difference(found_keys)
2044
raise RevisionNotPresent(missing_keys.pop(), self)
2046
def get_method(self, key):
2047
"""Return compression method of specified key."""
2048
return self._get_method(self._get_node(key))
2050
def _get_method(self, node):
2051
if not self._deltas:
2053
if self._compression_parent(node):
2058
def _get_node(self, key):
2060
return list(self._get_entries([key]))[0]
2062
raise RevisionNotPresent(key, self)
2064
def get_options(self, key):
2065
"""Return a list representing options.
2069
node = self._get_node(key)
2070
options = [self._get_method(node)]
2071
if node[2][0] == 'N':
2072
options.append('no-eol')
2075
def get_parent_map(self, keys):
2076
"""Get a map of the parents of keys.
2078
:param keys: The keys to look up parents for.
2079
:return: A mapping from keys to parents. Absent keys are absent from
2083
nodes = self._get_entries(keys)
2087
result[node[1]] = node[3][0]
2090
result[node[1]] = None
2093
def get_position(self, key):
2094
"""Return details needed to access the version.
2096
:return: a tuple (index, data position, size) to hand to the access
2097
logic to get the record.
2099
node = self._get_node(key)
2100
return self._node_to_position(node)
2103
"""Get all the keys in the collection.
2105
The keys are not ordered.
2108
return [node[1] for node in self._graph_index.iter_all_entries()]
2110
def _node_to_position(self, node):
2111
"""Convert an index value to position details."""
2112
bits = node[2][1:].split(' ')
2113
return node[0], int(bits[0]), int(bits[1])
2116
class _KnitKeyAccess(object):
2117
"""Access to records in .knit files."""
2119
def __init__(self, transport, mapper):
2120
"""Create a _KnitKeyAccess with transport and mapper.
2122
:param transport: The transport the access object is rooted at.
2123
:param mapper: The mapper used to map keys to .knit files.
2125
self._transport = transport
2126
self._mapper = mapper
2128
def add_raw_records(self, key_sizes, raw_data):
2129
"""Add raw knit bytes to a storage area.
2131
The data is spooled to the container writer in one bytes-record per
2134
:param sizes: An iterable of tuples containing the key and size of each
2136
:param raw_data: A bytestring containing the data.
2137
:return: A list of memos to retrieve the record later. Each memo is an
2138
opaque index memo. For _KnitKeyAccess the memo is (key, pos,
2139
length), where the key is the record key.
2141
if type(raw_data) != str:
2142
raise AssertionError(
2143
'data must be plain bytes was %s' % type(raw_data))
2146
# TODO: This can be tuned for writing to sftp and other servers where
2147
# append() is relatively expensive by grouping the writes to each key
2149
for key, size in key_sizes:
2150
path = self._mapper.map(key)
2152
base = self._transport.append_bytes(path + '.knit',
2153
raw_data[offset:offset+size])
2154
except errors.NoSuchFile:
2155
self._transport.mkdir(osutils.dirname(path))
2156
base = self._transport.append_bytes(path + '.knit',
2157
raw_data[offset:offset+size])
2161
result.append((key, base, size))
2164
def get_raw_records(self, memos_for_retrieval):
2165
"""Get the raw bytes for a records.
2167
:param memos_for_retrieval: An iterable containing the access memo for
2168
retrieving the bytes.
2169
:return: An iterator over the bytes of the records.
2171
# first pass, group into same-index request to minimise readv's issued.
2173
current_prefix = None
2174
for (key, offset, length) in memos_for_retrieval:
2175
if current_prefix == key[:-1]:
2176
current_list.append((offset, length))
2178
if current_prefix is not None:
2179
request_lists.append((current_prefix, current_list))
2180
current_prefix = key[:-1]
2181
current_list = [(offset, length)]
2182
# handle the last entry
2183
if current_prefix is not None:
2184
request_lists.append((current_prefix, current_list))
2185
for prefix, read_vector in request_lists:
2186
path = self._mapper.map(prefix) + '.knit'
2187
for pos, data in self._transport.readv(path, read_vector):
2191
class _DirectPackAccess(object):
2192
"""Access to data in one or more packs with less translation."""
2194
def __init__(self, index_to_packs):
2195
"""Create a _DirectPackAccess object.
2197
:param index_to_packs: A dict mapping index objects to the transport
2198
and file names for obtaining data.
2200
self._container_writer = None
2201
self._write_index = None
2202
self._indices = index_to_packs
2204
def add_raw_records(self, key_sizes, raw_data):
2205
"""Add raw knit bytes to a storage area.
2207
The data is spooled to the container writer in one bytes-record per
2210
:param sizes: An iterable of tuples containing the key and size of each
2212
:param raw_data: A bytestring containing the data.
2213
:return: A list of memos to retrieve the record later. Each memo is an
2214
opaque index memo. For _DirectPackAccess the memo is (index, pos,
2215
length), where the index field is the write_index object supplied
2216
to the PackAccess object.
2218
if type(raw_data) != str:
2219
raise AssertionError(
2220
'data must be plain bytes was %s' % type(raw_data))
2223
for key, size in key_sizes:
2224
p_offset, p_length = self._container_writer.add_bytes_record(
2225
raw_data[offset:offset+size], [])
2227
result.append((self._write_index, p_offset, p_length))
2230
def get_raw_records(self, memos_for_retrieval):
2231
"""Get the raw bytes for a records.
2233
:param memos_for_retrieval: An iterable containing the (index, pos,
2234
length) memo for retrieving the bytes. The Pack access method
2235
looks up the pack to use for a given record in its index_to_pack
2237
:return: An iterator over the bytes of the records.
2239
# first pass, group into same-index requests
2241
current_index = None
2242
for (index, offset, length) in memos_for_retrieval:
2243
if current_index == index:
2244
current_list.append((offset, length))
2246
if current_index is not None:
2247
request_lists.append((current_index, current_list))
2248
current_index = index
2249
current_list = [(offset, length)]
2250
# handle the last entry
2251
if current_index is not None:
2252
request_lists.append((current_index, current_list))
2253
for index, offsets in request_lists:
2254
transport, path = self._indices[index]
2255
reader = pack.make_readv_reader(transport, path, offsets)
2256
for names, read_func in reader.iter_records():
2257
yield read_func(None)
2259
def set_writer(self, writer, index, transport_packname):
2260
"""Set a writer to use for adding data."""
2261
if index is not None:
2262
self._indices[index] = transport_packname
2263
self._container_writer = writer
2264
self._write_index = index
2267
# Deprecated, use PatienceSequenceMatcher instead
2268
KnitSequenceMatcher = patiencediff.PatienceSequenceMatcher
2271
def annotate_knit(knit, revision_id):
2272
"""Annotate a knit with no cached annotations.
2274
This implementation is for knits with no cached annotations.
2275
It will work for knits with cached annotations, but this is not
2278
annotator = _KnitAnnotator(knit)
2279
return iter(annotator.annotate(revision_id))
2282
class _KnitAnnotator(object):
2283
"""Build up the annotations for a text."""
2285
def __init__(self, knit):
2288
# Content objects, differs from fulltexts because of how final newlines
2289
# are treated by knits. the content objects here will always have a
2291
self._fulltext_contents = {}
2293
# Annotated lines of specific revisions
2294
self._annotated_lines = {}
2296
# Track the raw data for nodes that we could not process yet.
2297
# This maps the revision_id of the base to a list of children that will
2298
# annotated from it.
2299
self._pending_children = {}
2301
# Nodes which cannot be extracted
2302
self._ghosts = set()
2304
# Track how many children this node has, so we know if we need to keep
2306
self._annotate_children = {}
2307
self._compression_children = {}
2309
self._all_build_details = {}
2310
# The children => parent revision_id graph
2311
self._revision_id_graph = {}
2313
self._heads_provider = None
2315
self._nodes_to_keep_annotations = set()
2316
self._generations_until_keep = 100
2318
def set_generations_until_keep(self, value):
2319
"""Set the number of generations before caching a node.
2321
Setting this to -1 will cache every merge node, setting this higher
2322
will cache fewer nodes.
2324
self._generations_until_keep = value
2326
def _add_fulltext_content(self, revision_id, content_obj):
2327
self._fulltext_contents[revision_id] = content_obj
2328
# TODO: jam 20080305 It might be good to check the sha1digest here
2329
return content_obj.text()
2331
def _check_parents(self, child, nodes_to_annotate):
2332
"""Check if all parents have been processed.
2334
:param child: A tuple of (rev_id, parents, raw_content)
2335
:param nodes_to_annotate: If child is ready, add it to
2336
nodes_to_annotate, otherwise put it back in self._pending_children
2338
for parent_id in child[1]:
2339
if (parent_id not in self._annotated_lines):
2340
# This parent is present, but another parent is missing
2341
self._pending_children.setdefault(parent_id,
2345
# This one is ready to be processed
2346
nodes_to_annotate.append(child)
2348
def _add_annotation(self, revision_id, fulltext, parent_ids,
2349
left_matching_blocks=None):
2350
"""Add an annotation entry.
2352
All parents should already have been annotated.
2353
:return: A list of children that now have their parents satisfied.
2355
a = self._annotated_lines
2356
annotated_parent_lines = [a[p] for p in parent_ids]
2357
annotated_lines = list(annotate.reannotate(annotated_parent_lines,
2358
fulltext, revision_id, left_matching_blocks,
2359
heads_provider=self._get_heads_provider()))
2360
self._annotated_lines[revision_id] = annotated_lines
2361
for p in parent_ids:
2362
ann_children = self._annotate_children[p]
2363
ann_children.remove(revision_id)
2364
if (not ann_children
2365
and p not in self._nodes_to_keep_annotations):
2366
del self._annotated_lines[p]
2367
del self._all_build_details[p]
2368
if p in self._fulltext_contents:
2369
del self._fulltext_contents[p]
2370
# Now that we've added this one, see if there are any pending
2371
# deltas to be done, certainly this parent is finished
2372
nodes_to_annotate = []
2373
for child in self._pending_children.pop(revision_id, []):
2374
self._check_parents(child, nodes_to_annotate)
2375
return nodes_to_annotate
2377
def _get_build_graph(self, key):
2378
"""Get the graphs for building texts and annotations.
2380
The data you need for creating a full text may be different than the
2381
data you need to annotate that text. (At a minimum, you need both
2382
parents to create an annotation, but only need 1 parent to generate the
2385
:return: A list of (key, index_memo) records, suitable for
2386
passing to read_records_iter to start reading in the raw data fro/
2389
if key in self._annotated_lines:
2392
pending = set([key])
2397
# get all pending nodes
2399
this_iteration = pending
2400
build_details = self._knit._index.get_build_details(this_iteration)
2401
self._all_build_details.update(build_details)
2402
# new_nodes = self._knit._index._get_entries(this_iteration)
2404
for key, details in build_details.iteritems():
2405
(index_memo, compression_parent, parents,
2406
record_details) = details
2407
self._revision_id_graph[key] = parents
2408
records.append((key, index_memo))
2409
# Do we actually need to check _annotated_lines?
2410
pending.update(p for p in parents
2411
if p not in self._all_build_details)
2412
if compression_parent:
2413
self._compression_children.setdefault(compression_parent,
2416
for parent in parents:
2417
self._annotate_children.setdefault(parent,
2419
num_gens = generation - kept_generation
2420
if ((num_gens >= self._generations_until_keep)
2421
and len(parents) > 1):
2422
kept_generation = generation
2423
self._nodes_to_keep_annotations.add(key)
2425
missing_versions = this_iteration.difference(build_details.keys())
2426
self._ghosts.update(missing_versions)
2427
for missing_version in missing_versions:
2428
# add a key, no parents
2429
self._revision_id_graph[missing_version] = ()
2430
pending.discard(missing_version) # don't look for it
2431
if self._ghosts.intersection(self._compression_children):
2433
"We cannot have nodes which have a ghost compression parent:\n"
2435
"compression children: %r"
2436
% (self._ghosts, self._compression_children))
2437
# Cleanout anything that depends on a ghost so that we don't wait for
2438
# the ghost to show up
2439
for node in self._ghosts:
2440
if node in self._annotate_children:
2441
# We won't be building this node
2442
del self._annotate_children[node]
2443
# Generally we will want to read the records in reverse order, because
2444
# we find the parent nodes after the children
2448
def _annotate_records(self, records):
2449
"""Build the annotations for the listed records."""
2450
# We iterate in the order read, rather than a strict order requested
2451
# However, process what we can, and put off to the side things that
2452
# still need parents, cleaning them up when those parents are
2454
for (rev_id, record,
2455
digest) in self._knit._read_records_iter(records):
2456
if rev_id in self._annotated_lines:
2458
parent_ids = self._revision_id_graph[rev_id]
2459
parent_ids = [p for p in parent_ids if p not in self._ghosts]
2460
details = self._all_build_details[rev_id]
2461
(index_memo, compression_parent, parents,
2462
record_details) = details
2463
nodes_to_annotate = []
2464
# TODO: Remove the punning between compression parents, and
2465
# parent_ids, we should be able to do this without assuming
2467
if len(parent_ids) == 0:
2468
# There are no parents for this node, so just add it
2469
# TODO: This probably needs to be decoupled
2470
fulltext_content, delta = self._knit._factory.parse_record(
2471
rev_id, record, record_details, None)
2472
fulltext = self._add_fulltext_content(rev_id, fulltext_content)
2473
nodes_to_annotate.extend(self._add_annotation(rev_id, fulltext,
2474
parent_ids, left_matching_blocks=None))
2476
child = (rev_id, parent_ids, record)
2477
# Check if all the parents are present
2478
self._check_parents(child, nodes_to_annotate)
2479
while nodes_to_annotate:
2480
# Should we use a queue here instead of a stack?
2481
(rev_id, parent_ids, record) = nodes_to_annotate.pop()
2482
(index_memo, compression_parent, parents,
2483
record_details) = self._all_build_details[rev_id]
2484
if compression_parent is not None:
2485
comp_children = self._compression_children[compression_parent]
2486
if rev_id not in comp_children:
2487
raise AssertionError("%r not in compression children %r"
2488
% (rev_id, comp_children))
2489
# If there is only 1 child, it is safe to reuse this
2491
reuse_content = (len(comp_children) == 1
2492
and compression_parent not in
2493
self._nodes_to_keep_annotations)
2495
# Remove it from the cache since it will be changing
2496
parent_fulltext_content = self._fulltext_contents.pop(compression_parent)
2497
# Make sure to copy the fulltext since it might be
2499
parent_fulltext = list(parent_fulltext_content.text())
2501
parent_fulltext_content = self._fulltext_contents[compression_parent]
2502
parent_fulltext = parent_fulltext_content.text()
2503
comp_children.remove(rev_id)
2504
fulltext_content, delta = self._knit._factory.parse_record(
2505
rev_id, record, record_details,
2506
parent_fulltext_content,
2507
copy_base_content=(not reuse_content))
2508
fulltext = self._add_fulltext_content(rev_id,
2510
blocks = KnitContent.get_line_delta_blocks(delta,
2511
parent_fulltext, fulltext)
2513
fulltext_content = self._knit._factory.parse_fulltext(
2515
fulltext = self._add_fulltext_content(rev_id,
2518
nodes_to_annotate.extend(
2519
self._add_annotation(rev_id, fulltext, parent_ids,
2520
left_matching_blocks=blocks))
2522
def _get_heads_provider(self):
2523
"""Create a heads provider for resolving ancestry issues."""
2524
if self._heads_provider is not None:
2525
return self._heads_provider
2526
parent_provider = _mod_graph.DictParentsProvider(
2527
self._revision_id_graph)
2528
graph_obj = _mod_graph.Graph(parent_provider)
2529
head_cache = _mod_graph.FrozenHeadsCache(graph_obj)
2530
self._heads_provider = head_cache
2533
def annotate(self, key):
2534
"""Return the annotated fulltext at the given key.
2536
:param key: The key to annotate.
2538
records = self._get_build_graph(key)
2539
if key in self._ghosts:
2540
raise errors.RevisionNotPresent(key, self._knit)
2541
self._annotate_records(records)
2542
return self._annotated_lines[key]
2546
from bzrlib._knit_load_data_c import _load_data_c as _load_data
2548
from bzrlib._knit_load_data_py import _load_data_py as _load_data