1
# Copyright (C) 2005, 2006, 2007 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
"""Knit versionedfile implementation.
19
A knit is a versioned file implementation that supports efficient append only
23
lifeless: the data file is made up of "delta records". each delta record has a delta header
24
that contains; (1) a version id, (2) the size of the delta (in lines), and (3) the digest of
25
the -expanded data- (ie, the delta applied to the parent). the delta also ends with a
26
end-marker; simply "end VERSION"
28
delta can be line or full contents.a
29
... the 8's there are the index number of the annotation.
30
version robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad 7 c7d23b2a5bd6ca00e8e266cec0ec228158ee9f9e
34
8 e.set('executable', 'yes')
36
8 if elt.get('executable') == 'yes':
37
8 ie.executable = True
38
end robertc@robertcollins.net-20051003014215-ee2990904cc4c7ad
42
09:33 < jrydberg> lifeless: each index is made up of a tuple of; version id, options, position, size, parents
43
09:33 < jrydberg> lifeless: the parents are currently dictionary compressed
44
09:33 < jrydberg> lifeless: (meaning it currently does not support ghosts)
45
09:33 < lifeless> right
46
09:33 < jrydberg> lifeless: the position and size is the range in the data file
49
so the index sequence is the dictionary compressed sequence number used
50
in the deltas to provide line annotation
55
# 10:16 < lifeless> make partial index writes safe
56
# 10:16 < lifeless> implement 'knit.check()' like weave.check()
57
# 10:17 < lifeless> record known ghosts so we can detect when they are filled in rather than the current 'reweave
59
# move sha1 out of the content so that join is faster at verifying parents
60
# record content length ?
64
from cStringIO import StringIO
65
from itertools import izip, chain
70
from zlib import Z_DEFAULT_COMPRESSION
73
from bzrlib.lazy_import import lazy_import
74
lazy_import(globals(), """
94
from bzrlib.errors import (
102
RevisionAlreadyPresent,
104
from bzrlib.graph import Graph
105
from bzrlib.osutils import (
112
from bzrlib.tsort import topo_sort
113
from bzrlib.tuned_gzip import GzipFile, bytes_to_gzip
115
from bzrlib.versionedfile import (
116
AbsentContentFactory,
125
# TODO: Split out code specific to this format into an associated object.
127
# TODO: Can we put in some kind of value to check that the index and data
128
# files belong together?
130
# TODO: accommodate binaries, perhaps by storing a byte count
132
# TODO: function to check whole file
134
# TODO: atomically append data, then measure backwards from the cursor
135
# position after writing to work out where it was located. we may need to
136
# bypass python file buffering.
138
DATA_SUFFIX = '.knit'
139
INDEX_SUFFIX = '.kndx'
142
class KnitAdapter(object):
143
"""Base class for knit record adaption."""
145
def __init__(self, basis_vf):
146
"""Create an adapter which accesses full texts from basis_vf.
148
:param basis_vf: A versioned file to access basis texts of deltas from.
149
May be None for adapters that do not need to access basis texts.
151
self._data = _KnitData(None)
152
self._annotate_factory = KnitAnnotateFactory()
153
self._plain_factory = KnitPlainFactory()
154
self._basis_vf = basis_vf
157
class FTAnnotatedToUnannotated(KnitAdapter):
158
"""An adapter from FT annotated knits to unannotated ones."""
160
def get_bytes(self, factory, annotated_compressed_bytes):
162
self._data._parse_record_unchecked(annotated_compressed_bytes)
163
content = self._annotate_factory.parse_fulltext(contents, rec[1])
164
size, bytes = self._data._record_to_data(rec[1], rec[3], content.text())
168
class DeltaAnnotatedToUnannotated(KnitAdapter):
169
"""An adapter for deltas from annotated to unannotated."""
171
def get_bytes(self, factory, annotated_compressed_bytes):
173
self._data._parse_record_unchecked(annotated_compressed_bytes)
174
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
176
contents = self._plain_factory.lower_line_delta(delta)
177
size, bytes = self._data._record_to_data(rec[1], rec[3], contents)
181
class FTAnnotatedToFullText(KnitAdapter):
182
"""An adapter from FT annotated knits to unannotated ones."""
184
def get_bytes(self, factory, annotated_compressed_bytes):
186
self._data._parse_record_unchecked(annotated_compressed_bytes)
187
content, delta = self._annotate_factory.parse_record(factory.key[0],
188
contents, factory._build_details, None)
189
return ''.join(content.text())
192
class DeltaAnnotatedToFullText(KnitAdapter):
193
"""An adapter for deltas from annotated to unannotated."""
195
def get_bytes(self, factory, annotated_compressed_bytes):
197
self._data._parse_record_unchecked(annotated_compressed_bytes)
198
delta = self._annotate_factory.parse_line_delta(contents, rec[1],
200
compression_parent = factory.parents[0][0]
201
basis_lines = self._basis_vf.get_lines(compression_parent)
202
# Manually apply the delta because we have one annotated content and
204
basis_content = PlainKnitContent(basis_lines, compression_parent)
205
basis_content.apply_delta(delta, rec[1])
206
basis_content._should_strip_eol = factory._build_details[1]
207
return ''.join(basis_content.text())
210
class FTPlainToFullText(KnitAdapter):
211
"""An adapter from FT plain knits to unannotated ones."""
213
def get_bytes(self, factory, compressed_bytes):
215
self._data._parse_record_unchecked(compressed_bytes)
216
content, delta = self._plain_factory.parse_record(factory.key[0],
217
contents, factory._build_details, None)
218
return ''.join(content.text())
221
class DeltaPlainToFullText(KnitAdapter):
222
"""An adapter for deltas from annotated to unannotated."""
224
def get_bytes(self, factory, compressed_bytes):
226
self._data._parse_record_unchecked(compressed_bytes)
227
delta = self._plain_factory.parse_line_delta(contents, rec[1])
228
compression_parent = factory.parents[0][0]
229
basis_lines = self._basis_vf.get_lines(compression_parent)
230
basis_content = PlainKnitContent(basis_lines, compression_parent)
231
# Manually apply the delta because we have one annotated content and
233
content, _ = self._plain_factory.parse_record(rec[1], contents,
234
factory._build_details, basis_content)
235
return ''.join(content.text())
238
class KnitContentFactory(ContentFactory):
239
"""Content factory for streaming from knits.
241
:seealso ContentFactory:
244
def __init__(self, version, parents, build_details, sha1, raw_record,
245
annotated, knit=None):
246
"""Create a KnitContentFactory for version.
248
:param version: The version.
249
:param parents: The parents.
250
:param build_details: The build details as returned from
252
:param sha1: The sha1 expected from the full text of this object.
253
:param raw_record: The bytes of the knit data from disk.
254
:param annotated: True if the raw data is annotated.
256
ContentFactory.__init__(self)
258
self.key = (version,)
259
self.parents = tuple((parent,) for parent in parents)
260
if build_details[0] == 'line-delta':
265
annotated_kind = 'annotated-'
268
self.storage_kind = 'knit-%s%s-gz' % (annotated_kind, kind)
269
self._raw_record = raw_record
270
self._build_details = build_details
273
def get_bytes_as(self, storage_kind):
274
if storage_kind == self.storage_kind:
275
return self._raw_record
276
if storage_kind == 'fulltext' and self._knit is not None:
277
return self._knit.get_text(self.key[0])
279
raise errors.UnavailableRepresentation(self.key, storage_kind,
283
class KnitContent(object):
284
"""Content of a knit version to which deltas can be applied."""
287
self._should_strip_eol = False
289
def apply_delta(self, delta, new_version_id):
290
"""Apply delta to this object to become new_version_id."""
291
raise NotImplementedError(self.apply_delta)
293
def cleanup_eol(self, copy_on_mutate=True):
294
if self._should_strip_eol:
296
self._lines = self._lines[:]
297
self.strip_last_line_newline()
299
def line_delta_iter(self, new_lines):
300
"""Generate line-based delta from this content to new_lines."""
301
new_texts = new_lines.text()
302
old_texts = self.text()
303
s = patiencediff.PatienceSequenceMatcher(None, old_texts, new_texts)
304
for tag, i1, i2, j1, j2 in s.get_opcodes():
307
# ofrom, oto, length, data
308
yield i1, i2, j2 - j1, new_lines._lines[j1:j2]
310
def line_delta(self, new_lines):
311
return list(self.line_delta_iter(new_lines))
314
def get_line_delta_blocks(knit_delta, source, target):
315
"""Extract SequenceMatcher.get_matching_blocks() from a knit delta"""
316
target_len = len(target)
319
for s_begin, s_end, t_len, new_text in knit_delta:
320
true_n = s_begin - s_pos
323
# knit deltas do not provide reliable info about whether the
324
# last line of a file matches, due to eol handling.
325
if source[s_pos + n -1] != target[t_pos + n -1]:
328
yield s_pos, t_pos, n
329
t_pos += t_len + true_n
331
n = target_len - t_pos
333
if source[s_pos + n -1] != target[t_pos + n -1]:
336
yield s_pos, t_pos, n
337
yield s_pos + (target_len - t_pos), target_len, 0
340
class AnnotatedKnitContent(KnitContent):
341
"""Annotated content."""
343
def __init__(self, lines):
344
KnitContent.__init__(self)
348
"""Return a list of (origin, text) for each content line."""
349
return list(self._lines)
351
def apply_delta(self, delta, new_version_id):
352
"""Apply delta to this object to become new_version_id."""
355
for start, end, count, delta_lines in delta:
356
lines[offset+start:offset+end] = delta_lines
357
offset = offset + (start - end) + count
359
def strip_last_line_newline(self):
360
line = self._lines[-1][1].rstrip('\n')
361
self._lines[-1] = (self._lines[-1][0], line)
362
self._should_strip_eol = False
366
lines = [text for origin, text in self._lines]
367
except ValueError, e:
368
# most commonly (only?) caused by the internal form of the knit
369
# missing annotation information because of a bug - see thread
371
raise KnitCorrupt(self,
372
"line in annotated knit missing annotation information: %s"
375
if self._should_strip_eol:
376
lines[-1] = lines[-1].rstrip('\n')
380
return AnnotatedKnitContent(self._lines[:])
383
class PlainKnitContent(KnitContent):
384
"""Unannotated content.
386
When annotate[_iter] is called on this content, the same version is reported
387
for all lines. Generally, annotate[_iter] is not useful on PlainKnitContent
391
def __init__(self, lines, version_id):
392
KnitContent.__init__(self)
394
self._version_id = version_id
397
"""Return a list of (origin, text) for each content line."""
398
return [(self._version_id, line) for line in self._lines]
400
def apply_delta(self, delta, new_version_id):
401
"""Apply delta to this object to become new_version_id."""
404
for start, end, count, delta_lines in delta:
405
lines[offset+start:offset+end] = delta_lines
406
offset = offset + (start - end) + count
407
self._version_id = new_version_id
410
return PlainKnitContent(self._lines[:], self._version_id)
412
def strip_last_line_newline(self):
413
self._lines[-1] = self._lines[-1].rstrip('\n')
414
self._should_strip_eol = False
418
if self._should_strip_eol:
420
lines[-1] = lines[-1].rstrip('\n')
424
class _KnitFactory(object):
425
"""Base class for common Factory functions."""
427
def parse_record(self, version_id, record, record_details,
428
base_content, copy_base_content=True):
429
"""Parse a record into a full content object.
431
:param version_id: The official version id for this content
432
:param record: The data returned by read_records_iter()
433
:param record_details: Details about the record returned by
435
:param base_content: If get_build_details returns a compression_parent,
436
you must return a base_content here, else use None
437
:param copy_base_content: When building from the base_content, decide
438
you can either copy it and return a new object, or modify it in
440
:return: (content, delta) A Content object and possibly a line-delta,
443
method, noeol = record_details
444
if method == 'line-delta':
445
if copy_base_content:
446
content = base_content.copy()
448
content = base_content
449
delta = self.parse_line_delta(record, version_id)
450
content.apply_delta(delta, version_id)
452
content = self.parse_fulltext(record, version_id)
454
content._should_strip_eol = noeol
455
return (content, delta)
458
class KnitAnnotateFactory(_KnitFactory):
459
"""Factory for creating annotated Content objects."""
463
def make(self, lines, version_id):
464
num_lines = len(lines)
465
return AnnotatedKnitContent(zip([version_id] * num_lines, lines))
467
def parse_fulltext(self, content, version_id):
468
"""Convert fulltext to internal representation
470
fulltext content is of the format
471
revid(utf8) plaintext\n
472
internal representation is of the format:
475
# TODO: jam 20070209 The tests expect this to be returned as tuples,
476
# but the code itself doesn't really depend on that.
477
# Figure out a way to not require the overhead of turning the
478
# list back into tuples.
479
lines = [tuple(line.split(' ', 1)) for line in content]
480
return AnnotatedKnitContent(lines)
482
def parse_line_delta_iter(self, lines):
483
return iter(self.parse_line_delta(lines))
485
def parse_line_delta(self, lines, version_id, plain=False):
486
"""Convert a line based delta into internal representation.
488
line delta is in the form of:
489
intstart intend intcount
491
revid(utf8) newline\n
492
internal representation is
493
(start, end, count, [1..count tuples (revid, newline)])
495
:param plain: If True, the lines are returned as a plain
496
list without annotations, not as a list of (origin, content) tuples, i.e.
497
(start, end, count, [1..count newline])
504
def cache_and_return(line):
505
origin, text = line.split(' ', 1)
506
return cache.setdefault(origin, origin), text
508
# walk through the lines parsing.
509
# Note that the plain test is explicitly pulled out of the
510
# loop to minimise any performance impact
513
start, end, count = [int(n) for n in header.split(',')]
514
contents = [next().split(' ', 1)[1] for i in xrange(count)]
515
result.append((start, end, count, contents))
518
start, end, count = [int(n) for n in header.split(',')]
519
contents = [tuple(next().split(' ', 1)) for i in xrange(count)]
520
result.append((start, end, count, contents))
523
def get_fulltext_content(self, lines):
524
"""Extract just the content lines from a fulltext."""
525
return (line.split(' ', 1)[1] for line in lines)
527
def get_linedelta_content(self, lines):
528
"""Extract just the content from a line delta.
530
This doesn't return all of the extra information stored in a delta.
531
Only the actual content lines.
536
header = header.split(',')
537
count = int(header[2])
538
for i in xrange(count):
539
origin, text = next().split(' ', 1)
542
def lower_fulltext(self, content):
543
"""convert a fulltext content record into a serializable form.
545
see parse_fulltext which this inverts.
547
# TODO: jam 20070209 We only do the caching thing to make sure that
548
# the origin is a valid utf-8 line, eventually we could remove it
549
return ['%s %s' % (o, t) for o, t in content._lines]
551
def lower_line_delta(self, delta):
552
"""convert a delta into a serializable form.
554
See parse_line_delta which this inverts.
556
# TODO: jam 20070209 We only do the caching thing to make sure that
557
# the origin is a valid utf-8 line, eventually we could remove it
559
for start, end, c, lines in delta:
560
out.append('%d,%d,%d\n' % (start, end, c))
561
out.extend(origin + ' ' + text
562
for origin, text in lines)
565
def annotate(self, knit, version_id):
566
content = knit._get_content(version_id)
567
return content.annotate()
570
class KnitPlainFactory(_KnitFactory):
571
"""Factory for creating plain Content objects."""
575
def make(self, lines, version_id):
576
return PlainKnitContent(lines, version_id)
578
def parse_fulltext(self, content, version_id):
579
"""This parses an unannotated fulltext.
581
Note that this is not a noop - the internal representation
582
has (versionid, line) - its just a constant versionid.
584
return self.make(content, version_id)
586
def parse_line_delta_iter(self, lines, version_id):
588
num_lines = len(lines)
589
while cur < num_lines:
592
start, end, c = [int(n) for n in header.split(',')]
593
yield start, end, c, lines[cur:cur+c]
596
def parse_line_delta(self, lines, version_id):
597
return list(self.parse_line_delta_iter(lines, version_id))
599
def get_fulltext_content(self, lines):
600
"""Extract just the content lines from a fulltext."""
603
def get_linedelta_content(self, lines):
604
"""Extract just the content from a line delta.
606
This doesn't return all of the extra information stored in a delta.
607
Only the actual content lines.
612
header = header.split(',')
613
count = int(header[2])
614
for i in xrange(count):
617
def lower_fulltext(self, content):
618
return content.text()
620
def lower_line_delta(self, delta):
622
for start, end, c, lines in delta:
623
out.append('%d,%d,%d\n' % (start, end, c))
627
def annotate(self, knit, version_id):
628
annotator = _KnitAnnotator(knit)
629
return annotator.annotate(version_id)
632
def make_empty_knit(transport, relpath):
633
"""Construct a empty knit at the specified location."""
634
k = make_file_knit(transport, relpath, 'w', KnitPlainFactory)
637
def make_file_knit(name, transport, file_mode=None, access_mode='w',
638
factory=None, delta=True, create=False, create_parent_dir=False,
639
delay_create=False, dir_mode=None, get_scope=None):
640
"""Factory to create a KnitVersionedFile for a .knit/.kndx file pair."""
642
factory = KnitAnnotateFactory()
643
if get_scope is None:
644
get_scope = lambda:None
645
index = _KnitIndex(transport, name + INDEX_SUFFIX,
646
access_mode, create=create, file_mode=file_mode,
647
create_parent_dir=create_parent_dir, delay_create=delay_create,
648
dir_mode=dir_mode, get_scope=get_scope)
649
access = _KnitAccess(transport, name + DATA_SUFFIX, file_mode,
650
dir_mode, ((create and not len(index)) and delay_create),
652
return KnitVersionedFile(name, transport, factory=factory,
653
create=create, delay_create=delay_create, index=index,
654
access_method=access)
658
"""Return the suffixes used by file based knits."""
659
return [DATA_SUFFIX, INDEX_SUFFIX]
660
make_file_knit.get_suffixes = get_suffixes
663
class KnitVersionedFile(VersionedFile):
664
"""Weave-like structure with faster random access.
666
A knit stores a number of texts and a summary of the relationships
667
between them. Texts are identified by a string version-id. Texts
668
are normally stored and retrieved as a series of lines, but can
669
also be passed as single strings.
671
Lines are stored with the trailing newline (if any) included, to
672
avoid special cases for files with no final newline. Lines are
673
composed of 8-bit characters, not unicode. The combination of
674
these approaches should mean any 'binary' file can be safely
675
stored and retrieved.
678
def __init__(self, relpath, transport, file_mode=None,
679
factory=None, delta=True, create=False, create_parent_dir=False,
680
delay_create=False, dir_mode=None, index=None, access_method=None):
681
"""Construct a knit at location specified by relpath.
683
:param create: If not True, only open an existing knit.
684
:param create_parent_dir: If True, create the parent directory if
685
creating the file fails. (This is used for stores with
686
hash-prefixes that may not exist yet)
687
:param delay_create: The calling code is aware that the knit won't
688
actually be created until the first data is stored.
689
:param index: An index to use for the knit.
691
super(KnitVersionedFile, self).__init__()
692
self.transport = transport
693
self.filename = relpath
694
self.factory = factory or KnitAnnotateFactory()
697
self._max_delta_chain = 200
699
if None in (access_method, index):
700
raise ValueError("No default access_method or index any more")
702
_access = access_method
703
if create and not len(self) and not delay_create:
705
self._data = _KnitData(_access)
708
return '%s(%s)' % (self.__class__.__name__,
709
self.transport.abspath(self.filename))
711
def _check_should_delta(self, first_parents):
712
"""Iterate back through the parent listing, looking for a fulltext.
714
This is used when we want to decide whether to add a delta or a new
715
fulltext. It searches for _max_delta_chain parents. When it finds a
716
fulltext parent, it sees if the total size of the deltas leading up to
717
it is large enough to indicate that we want a new full text anyway.
719
Return True if we should create a new delta, False if we should use a
724
delta_parents = first_parents
725
for count in xrange(self._max_delta_chain):
726
parent = delta_parents[0]
727
method = self._index.get_method(parent)
728
index, pos, size = self._index.get_position(parent)
729
if method == 'fulltext':
733
delta_parents = self._index.get_parent_map([parent])[parent]
735
# We couldn't find a fulltext, so we must create a new one
738
return fulltext_size > delta_size
740
def _check_write_ok(self):
741
return self._index._check_write_ok()
743
def _add_raw_records(self, records, data):
744
"""Add all the records 'records' with data pre-joined in 'data'.
746
:param records: A list of tuples(version_id, options, parents, size).
747
:param data: The data for the records. When it is written, the records
748
are adjusted to have pos pointing into data by the sum of
749
the preceding records sizes.
752
raw_record_sizes = [record[3] for record in records]
753
positions = self._data.add_raw_records(raw_record_sizes, data)
755
for (version_id, options, parents, _), access_memo in zip(
757
index_entries.append((version_id, options, access_memo, parents))
758
self._index.add_versions(index_entries)
760
def copy_to(self, name, transport):
761
"""See VersionedFile.copy_to()."""
762
# copy the current index to a temp index to avoid racing with local
764
transport.put_file_non_atomic(name + INDEX_SUFFIX + '.tmp',
765
self.transport.get(self._index._filename))
767
f = self._data._open_file()
769
transport.put_file(name + DATA_SUFFIX, f)
772
# move the copied index into place
773
transport.move(name + INDEX_SUFFIX + '.tmp', name + INDEX_SUFFIX)
775
def get_data_stream(self, required_versions):
776
"""Get a data stream for the specified versions.
778
Versions may be returned in any order, not necessarily the order
779
specified. They are returned in a partial order by compression
780
parent, so that the deltas can be applied as the data stream is
781
inserted; however note that compression parents will not be sent
782
unless they were specifically requested, as the client may already
785
:param required_versions: The exact set of versions to be extracted.
786
Unlike some other knit methods, this is not used to generate a
787
transitive closure, rather it is used precisely as given.
789
:returns: format_signature, list of (version, options, length, parents),
792
required_version_set = frozenset(required_versions)
794
# list of revisions that can just be sent without waiting for their
797
# map from revision to the children based on it
799
# first, read all relevant index data, enough to sort into the right
801
for version_id in required_versions:
802
options = self._index.get_options(version_id)
803
parents = self._index.get_parents_with_ghosts(version_id)
804
index_memo = self._index.get_position(version_id)
805
version_index[version_id] = (index_memo, options, parents)
806
if ('line-delta' in options
807
and parents[0] in required_version_set):
808
# must wait until the parent has been sent
809
deferred.setdefault(parents[0], []). \
812
# either a fulltext, or a delta whose parent the client did
813
# not ask for and presumably already has
814
ready_to_send.append(version_id)
815
# build a list of results to return, plus instructions for data to
817
copy_queue_records = []
818
temp_version_list = []
820
# XXX: pushing and popping lists may be a bit inefficient
821
version_id = ready_to_send.pop(0)
822
(index_memo, options, parents) = version_index[version_id]
823
copy_queue_records.append((version_id, index_memo))
824
none, data_pos, data_size = index_memo
825
temp_version_list.append((version_id, options, data_size,
827
if version_id in deferred:
828
# now we can send all the children of this revision - we could
829
# put them in anywhere, but we hope that sending them soon
830
# after the fulltext will give good locality in the receiver
831
ready_to_send[:0] = deferred.pop(version_id)
832
if not (len(deferred) == 0):
833
raise AssertionError("Still have compressed child versions waiting to be sent")
834
# XXX: The stream format is such that we cannot stream it - we have to
835
# know the length of all the data a-priori.
837
result_version_list = []
838
for (version_id, raw_data, _), \
839
(version_id2, options, _, parents) in \
840
izip(self._data.read_records_iter_raw(copy_queue_records),
842
if not (version_id == version_id2):
843
raise AssertionError('logic error, inconsistent results')
844
raw_datum.append(raw_data)
845
result_version_list.append(
846
(version_id, options, len(raw_data), parents))
847
# provide a callback to get data incrementally.
848
pseudo_file = StringIO(''.join(raw_datum))
851
return pseudo_file.read()
853
return pseudo_file.read(length)
854
return (self.get_format_signature(), result_version_list, read)
856
def get_record_stream(self, versions, ordering, include_delta_closure):
857
"""Get a stream of records for versions.
859
:param versions: The versions to include. Each version is a tuple
861
:param ordering: Either 'unordered' or 'topological'. A topologically
862
sorted stream has compression parents strictly before their
864
:param include_delta_closure: If True then the closure across any
865
compression parents will be included (in the opaque data).
866
:return: An iterator of ContentFactory objects, each of which is only
867
valid until the iterator is advanced.
869
if include_delta_closure:
870
# Nb: what we should do is plan the data to stream to allow
871
# reconstruction of all the texts without excessive buffering,
872
# including re-sending common bases as needed. This makes the most
873
# sense when we start serialising these streams though, so for now
874
# we just fallback to individual text construction behind the
875
# abstraction barrier.
879
# We end up doing multiple index lookups here for parents details and
880
# disk layout details - we need a unified api ?
881
parent_map = self.get_parent_map(versions)
882
absent_versions = set(versions) - set(parent_map)
883
if ordering == 'topological':
884
present_versions = topo_sort(parent_map)
886
# List comprehension to keep the requested order (as that seems
887
# marginally useful, at least until we start doing IO optimising
889
present_versions = [version for version in versions if version in
891
position_map = self._get_components_positions(present_versions)
892
records = [(version, position_map[version][1]) for version in
895
for version in absent_versions:
896
yield AbsentContentFactory((version,))
897
for version, raw_data, sha1 in \
898
self._data.read_records_iter_raw(records):
899
(record_details, index_memo, _) = position_map[version]
900
yield KnitContentFactory(version, parent_map[version],
901
record_details, sha1, raw_data, self.factory.annotated, knit)
903
def _extract_blocks(self, version_id, source, target):
904
if self._index.get_method(version_id) != 'line-delta':
906
parent, sha1, noeol, delta = self.get_delta(version_id)
907
return KnitContent.get_line_delta_blocks(delta, source, target)
909
def get_delta(self, version_id):
910
"""Get a delta for constructing version from some other version."""
911
self.check_not_reserved_id(version_id)
912
parents = self.get_parent_map([version_id])[version_id]
917
index_memo = self._index.get_position(version_id)
918
data, sha1 = self._data.read_records(((version_id, index_memo),))[version_id]
919
noeol = 'no-eol' in self._index.get_options(version_id)
920
if 'fulltext' == self._index.get_method(version_id):
921
new_content = self.factory.parse_fulltext(data, version_id)
922
if parent is not None:
923
reference_content = self._get_content(parent)
924
old_texts = reference_content.text()
927
new_texts = new_content.text()
928
delta_seq = patiencediff.PatienceSequenceMatcher(None, old_texts,
930
return parent, sha1, noeol, self._make_line_delta(delta_seq, new_content)
932
delta = self.factory.parse_line_delta(data, version_id)
933
return parent, sha1, noeol, delta
935
def get_format_signature(self):
936
"""See VersionedFile.get_format_signature()."""
937
if self.factory.annotated:
938
annotated_part = "annotated"
940
annotated_part = "plain"
941
return "knit-%s" % (annotated_part,)
943
def get_sha1s(self, version_ids):
944
"""See VersionedFile.get_sha1s()."""
945
record_map = self._get_record_map(version_ids)
946
# record entry 2 is the 'digest'.
947
return [record_map[v][2] for v in version_ids]
949
def insert_data_stream(self, (format, data_list, reader_callable)):
950
"""Insert knit records from a data stream into this knit.
952
If a version in the stream is already present in this knit, it will not
953
be inserted a second time. It will be checked for consistency with the
954
stored version however, and may cause a KnitCorrupt error to be raised
955
if the data in the stream disagrees with the already stored data.
957
:seealso: get_data_stream
959
if format != self.get_format_signature():
960
if 'knit' in debug.debug_flags:
962
'incompatible format signature inserting to %r', self)
963
source = self._knit_from_datastream(
964
(format, data_list, reader_callable))
965
stream = source.get_record_stream(source.versions(), 'unordered', False)
966
self.insert_record_stream(stream)
969
for version_id, options, length, parents in data_list:
970
if self.has_version(version_id):
971
# First check: the list of parents.
972
my_parents = self.get_parents_with_ghosts(version_id)
973
if tuple(my_parents) != tuple(parents):
974
# XXX: KnitCorrupt is not quite the right exception here.
977
'parents list %r from data stream does not match '
978
'already recorded parents %r for %s'
979
% (parents, my_parents, version_id))
981
# Also check the SHA-1 of the fulltext this content will
983
raw_data = reader_callable(length)
984
my_fulltext_sha1 = self.get_sha1s([version_id])[0]
985
df, rec = self._data._parse_record_header(version_id, raw_data)
986
stream_fulltext_sha1 = rec[3]
987
if my_fulltext_sha1 != stream_fulltext_sha1:
988
# Actually, we don't know if it's this knit that's corrupt,
989
# or the data stream we're trying to insert.
991
self.filename, 'sha-1 does not match %s' % version_id)
993
if 'line-delta' in options:
994
# Make sure that this knit record is actually useful: a
995
# line-delta is no use unless we have its parent.
996
# Fetching from a broken repository with this problem
997
# shouldn't break the target repository.
999
# See https://bugs.launchpad.net/bzr/+bug/164443
1000
if not self._index.has_version(parents[0]):
1003
'line-delta from stream '
1006
'missing parent %s\n'
1007
'Try running "bzr check" '
1008
'on the source repository, and "bzr reconcile" '
1010
(version_id, parents[0]))
1012
# We received a line-delta record for a non-delta knit.
1013
# Convert it to a fulltext.
1014
gzip_bytes = reader_callable(length)
1015
self._convert_line_delta_to_fulltext(
1016
gzip_bytes, version_id, parents)
1019
self._add_raw_records(
1020
[(version_id, options, parents, length)],
1021
reader_callable(length))
1023
def _convert_line_delta_to_fulltext(self, gzip_bytes, version_id, parents):
1024
lines, sha1 = self._data._parse_record(version_id, gzip_bytes)
1025
delta = self.factory.parse_line_delta(lines, version_id)
1026
content = self.factory.make(self.get_lines(parents[0]), parents[0])
1027
content.apply_delta(delta, version_id)
1028
digest, len, content = self.add_lines(
1029
version_id, parents, content.text())
1031
raise errors.VersionedFileInvalidChecksum(version_id)
1033
def _knit_from_datastream(self, (format, data_list, reader_callable)):
1034
"""Create a knit object from a data stream.
1036
This method exists to allow conversion of data streams that do not
1037
match the signature of this knit. Generally it will be slower and use
1038
more memory to use this method to insert data, but it will work.
1040
:seealso: get_data_stream for details on datastreams.
1041
:return: A knit versioned file which can be used to join the datastream
1044
if format == "knit-plain":
1045
factory = KnitPlainFactory()
1046
elif format == "knit-annotated":
1047
factory = KnitAnnotateFactory()
1049
raise errors.KnitDataStreamUnknown(format)
1050
index = _StreamIndex(data_list, self._index)
1051
access = _StreamAccess(reader_callable, index, self, factory)
1052
return KnitVersionedFile(self.filename, self.transport,
1053
factory=factory, index=index, access_method=access)
1055
def insert_record_stream(self, stream):
1056
"""Insert a record stream into this versioned file.
1058
:param stream: A stream of records to insert.
1060
:seealso VersionedFile.get_record_stream:
1062
def get_adapter(adapter_key):
1064
return adapters[adapter_key]
1066
adapter_factory = adapter_registry.get(adapter_key)
1067
adapter = adapter_factory(self)
1068
adapters[adapter_key] = adapter
1070
if self.factory.annotated:
1071
# self is annotated, we need annotated knits to use directly.
1072
annotated = "annotated-"
1075
# self is not annotated, but we can strip annotations cheaply.
1077
convertibles = set(["knit-annotated-delta-gz",
1078
"knit-annotated-ft-gz"])
1079
# The set of types we can cheaply adapt without needing basis texts.
1080
native_types = set()
1081
native_types.add("knit-%sdelta-gz" % annotated)
1082
native_types.add("knit-%sft-gz" % annotated)
1083
knit_types = native_types.union(convertibles)
1085
# Buffer all index entries that we can't add immediately because their
1086
# basis parent is missing. We don't buffer all because generating
1087
# annotations may require access to some of the new records. However we
1088
# can't generate annotations from new deltas until their basis parent
1089
# is present anyway, so we get away with not needing an index that
1090
# includes the new keys.
1091
# key = basis_parent, value = index entry to add
1092
buffered_index_entries = {}
1093
for record in stream:
1094
# Raise an error when a record is missing.
1095
if record.storage_kind == 'absent':
1096
raise RevisionNotPresent([record.key[0]], self)
1097
# adapt to non-tuple interface
1098
parents = [parent[0] for parent in record.parents]
1099
if record.storage_kind in knit_types:
1100
if record.storage_kind not in native_types:
1102
adapter_key = (record.storage_kind, "knit-delta-gz")
1103
adapter = get_adapter(adapter_key)
1105
adapter_key = (record.storage_kind, "knit-ft-gz")
1106
adapter = get_adapter(adapter_key)
1107
bytes = adapter.get_bytes(
1108
record, record.get_bytes_as(record.storage_kind))
1110
bytes = record.get_bytes_as(record.storage_kind)
1111
options = [record._build_details[0]]
1112
if record._build_details[1]:
1113
options.append('no-eol')
1114
# Just blat it across.
1115
# Note: This does end up adding data on duplicate keys. As
1116
# modern repositories use atomic insertions this should not
1117
# lead to excessive growth in the event of interrupted fetches.
1118
# 'knit' repositories may suffer excessive growth, but as a
1119
# deprecated format this is tolerable. It can be fixed if
1120
# needed by in the kndx index support raising on a duplicate
1121
# add with identical parents and options.
1122
access_memo = self._data.add_raw_records([len(bytes)], bytes)[0]
1123
index_entry = (record.key[0], options, access_memo, parents)
1125
if 'fulltext' not in options:
1126
basis_parent = parents[0]
1127
if not self.has_version(basis_parent):
1128
pending = buffered_index_entries.setdefault(
1130
pending.append(index_entry)
1133
self._index.add_versions([index_entry])
1134
elif record.storage_kind == 'fulltext':
1135
self.add_lines(record.key[0], parents,
1136
split_lines(record.get_bytes_as('fulltext')))
1138
adapter_key = record.storage_kind, 'fulltext'
1139
adapter = get_adapter(adapter_key)
1140
lines = split_lines(adapter.get_bytes(
1141
record, record.get_bytes_as(record.storage_kind)))
1143
self.add_lines(record.key[0], parents, lines)
1144
except errors.RevisionAlreadyPresent:
1146
# Add any records whose basis parent is now available.
1147
added_keys = [record.key[0]]
1149
key = added_keys.pop(0)
1150
if key in buffered_index_entries:
1151
index_entries = buffered_index_entries[key]
1152
self._index.add_versions(index_entries)
1154
[index_entry[0] for index_entry in index_entries])
1155
del buffered_index_entries[key]
1156
# If there were any deltas which had a missing basis parent, error.
1157
if buffered_index_entries:
1158
raise errors.RevisionNotPresent(buffered_index_entries.keys()[0],
1162
"""See VersionedFile.versions."""
1163
if 'evil' in debug.debug_flags:
1164
trace.mutter_callsite(2, "versions scales with size of history")
1165
return self._index.get_versions()
1167
def has_version(self, version_id):
1168
"""See VersionedFile.has_version."""
1169
if 'evil' in debug.debug_flags:
1170
trace.mutter_callsite(2, "has_version is a LBYL scenario")
1171
return self._index.has_version(version_id)
1173
__contains__ = has_version
1175
def _merge_annotations(self, content, parents, parent_texts={},
1176
delta=None, annotated=None,
1177
left_matching_blocks=None):
1178
"""Merge annotations for content. This is done by comparing
1179
the annotations based on changed to the text.
1181
if left_matching_blocks is not None:
1182
delta_seq = diff._PrematchedMatcher(left_matching_blocks)
1186
for parent_id in parents:
1187
merge_content = self._get_content(parent_id, parent_texts)
1188
if (parent_id == parents[0] and delta_seq is not None):
1191
seq = patiencediff.PatienceSequenceMatcher(
1192
None, merge_content.text(), content.text())
1193
for i, j, n in seq.get_matching_blocks():
1196
# this appears to copy (origin, text) pairs across to the
1197
# new content for any line that matches the last-checked
1199
content._lines[j:j+n] = merge_content._lines[i:i+n]
1201
if delta_seq is None:
1202
reference_content = self._get_content(parents[0], parent_texts)
1203
new_texts = content.text()
1204
old_texts = reference_content.text()
1205
delta_seq = patiencediff.PatienceSequenceMatcher(
1206
None, old_texts, new_texts)
1207
return self._make_line_delta(delta_seq, content)
1209
def _make_line_delta(self, delta_seq, new_content):
1210
"""Generate a line delta from delta_seq and new_content."""
1212
for op in delta_seq.get_opcodes():
1213
if op[0] == 'equal':
1215
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
1218
def _get_components_positions(self, version_ids):
1219
"""Produce a map of position data for the components of versions.
1221
This data is intended to be used for retrieving the knit records.
1223
A dict of version_id to (record_details, index_memo, next, parents) is
1225
method is the way referenced data should be applied.
1226
index_memo is the handle to pass to the data access to actually get the
1228
next is the build-parent of the version, or None for fulltexts.
1229
parents is the version_ids of the parents of this version
1232
pending_components = version_ids
1233
while pending_components:
1234
build_details = self._index.get_build_details(pending_components)
1235
current_components = set(pending_components)
1236
pending_components = set()
1237
for version_id, details in build_details.iteritems():
1238
(index_memo, compression_parent, parents,
1239
record_details) = details
1240
method = record_details[0]
1241
if compression_parent is not None:
1242
pending_components.add(compression_parent)
1243
component_data[version_id] = (record_details, index_memo,
1245
missing = current_components.difference(build_details)
1247
raise errors.RevisionNotPresent(missing.pop(), self.filename)
1248
return component_data
1250
def _get_content(self, version_id, parent_texts={}):
1251
"""Returns a content object that makes up the specified
1253
cached_version = parent_texts.get(version_id, None)
1254
if cached_version is not None:
1255
if not self.has_version(version_id):
1256
raise RevisionNotPresent(version_id, self.filename)
1257
return cached_version
1259
text_map, contents_map = self._get_content_maps([version_id])
1260
return contents_map[version_id]
1262
def _check_versions_present(self, version_ids):
1263
"""Check that all specified versions are present."""
1264
self._index.check_versions_present(version_ids)
1266
def _add_lines_with_ghosts(self, version_id, parents, lines, parent_texts,
1267
nostore_sha, random_id, check_content, left_matching_blocks):
1268
"""See VersionedFile.add_lines_with_ghosts()."""
1269
self._check_add(version_id, lines, random_id, check_content)
1270
return self._add(version_id, lines, parents, self.delta,
1271
parent_texts, left_matching_blocks, nostore_sha, random_id)
1273
def _add_lines(self, version_id, parents, lines, parent_texts,
1274
left_matching_blocks, nostore_sha, random_id, check_content):
1275
"""See VersionedFile.add_lines."""
1276
self._check_add(version_id, lines, random_id, check_content)
1277
self._check_versions_present(parents)
1278
return self._add(version_id, lines[:], parents, self.delta,
1279
parent_texts, left_matching_blocks, nostore_sha, random_id)
1281
def _check_add(self, version_id, lines, random_id, check_content):
1282
"""check that version_id and lines are safe to add."""
1283
if contains_whitespace(version_id):
1284
raise InvalidRevisionId(version_id, self.filename)
1285
self.check_not_reserved_id(version_id)
1286
# Technically this could be avoided if we are happy to allow duplicate
1287
# id insertion when other things than bzr core insert texts, but it
1288
# seems useful for folk using the knit api directly to have some safety
1289
# blanket that we can disable.
1290
if not random_id and self.has_version(version_id):
1291
raise RevisionAlreadyPresent(version_id, self.filename)
1293
self._check_lines_not_unicode(lines)
1294
self._check_lines_are_lines(lines)
1296
def _add(self, version_id, lines, parents, delta, parent_texts,
1297
left_matching_blocks, nostore_sha, random_id):
1298
"""Add a set of lines on top of version specified by parents.
1300
If delta is true, compress the text as a line-delta against
1303
Any versions not present will be converted into ghosts.
1305
# first thing, if the content is something we don't need to store, find
1307
line_bytes = ''.join(lines)
1308
digest = sha_string(line_bytes)
1309
if nostore_sha == digest:
1310
raise errors.ExistingContent
1312
present_parents = []
1313
if parent_texts is None:
1315
for parent in parents:
1316
if self.has_version(parent):
1317
present_parents.append(parent)
1319
# can only compress against the left most present parent.
1321
(len(present_parents) == 0 or
1322
present_parents[0] != parents[0])):
1325
text_length = len(line_bytes)
1328
if lines[-1][-1] != '\n':
1329
# copy the contents of lines.
1331
options.append('no-eol')
1332
lines[-1] = lines[-1] + '\n'
1336
# To speed the extract of texts the delta chain is limited
1337
# to a fixed number of deltas. This should minimize both
1338
# I/O and the time spend applying deltas.
1339
delta = self._check_should_delta(present_parents)
1341
content = self.factory.make(lines, version_id)
1342
if delta or (self.factory.annotated and len(present_parents) > 0):
1343
# Merge annotations from parent texts if needed.
1344
delta_hunks = self._merge_annotations(content, present_parents,
1345
parent_texts, delta, self.factory.annotated,
1346
left_matching_blocks)
1349
options.append('line-delta')
1350
store_lines = self.factory.lower_line_delta(delta_hunks)
1351
size, bytes = self._data._record_to_data(version_id, digest,
1354
options.append('fulltext')
1355
# isinstance is slower and we have no hierarchy.
1356
if self.factory.__class__ == KnitPlainFactory:
1357
# Use the already joined bytes saving iteration time in
1359
size, bytes = self._data._record_to_data(version_id, digest,
1360
lines, [line_bytes])
1362
# get mixed annotation + content and feed it into the
1364
store_lines = self.factory.lower_fulltext(content)
1365
size, bytes = self._data._record_to_data(version_id, digest,
1368
access_memo = self._data.add_raw_records([size], bytes)[0]
1369
self._index.add_versions(
1370
((version_id, options, access_memo, parents),),
1371
random_id=random_id)
1372
return digest, text_length, content
1374
def check(self, progress_bar=None):
1375
"""See VersionedFile.check()."""
1376
# This doesn't actually test extraction of everything, but that will
1377
# impact 'bzr check' substantially, and needs to be integrated with
1378
# care. However, it does check for the obvious problem of a delta with
1380
versions = self.versions()
1381
parent_map = self.get_parent_map(versions)
1382
for version in versions:
1383
if self._index.get_method(version) != 'fulltext':
1384
compression_parent = parent_map[version][0]
1385
if compression_parent not in parent_map:
1386
raise errors.KnitCorrupt(self,
1387
"Missing basis parent %s for %s" % (
1388
compression_parent, version))
1390
def get_lines(self, version_id):
1391
"""See VersionedFile.get_lines()."""
1392
return self.get_line_list([version_id])[0]
1394
def _get_record_map(self, version_ids):
1395
"""Produce a dictionary of knit records.
1397
:return: {version_id:(record, record_details, digest, next)}
1399
data returned from read_records
1401
opaque information to pass to parse_record
1403
SHA1 digest of the full text after all steps are done
1405
build-parent of the version, i.e. the leftmost ancestor.
1406
Will be None if the record is not a delta.
1408
position_map = self._get_components_positions(version_ids)
1409
# c = component_id, r = record_details, i_m = index_memo, n = next
1410
records = [(c, i_m) for c, (r, i_m, n)
1411
in position_map.iteritems()]
1413
for component_id, record, digest in \
1414
self._data.read_records_iter(records):
1415
(record_details, index_memo, next) = position_map[component_id]
1416
record_map[component_id] = record, record_details, digest, next
1420
def get_text(self, version_id):
1421
"""See VersionedFile.get_text"""
1422
return self.get_texts([version_id])[0]
1424
def get_texts(self, version_ids):
1425
return [''.join(l) for l in self.get_line_list(version_ids)]
1427
def get_line_list(self, version_ids):
1428
"""Return the texts of listed versions as a list of strings."""
1429
for version_id in version_ids:
1430
self.check_not_reserved_id(version_id)
1431
text_map, content_map = self._get_content_maps(version_ids)
1432
return [text_map[v] for v in version_ids]
1434
_get_lf_split_line_list = get_line_list
1436
def _get_content_maps(self, version_ids):
1437
"""Produce maps of text and KnitContents
1439
:return: (text_map, content_map) where text_map contains the texts for
1440
the requested versions and content_map contains the KnitContents.
1441
Both dicts take version_ids as their keys.
1443
# FUTURE: This function could be improved for the 'extract many' case
1444
# by tracking each component and only doing the copy when the number of
1445
# children than need to apply delta's to it is > 1 or it is part of the
1447
version_ids = list(version_ids)
1448
multiple_versions = len(version_ids) != 1
1449
record_map = self._get_record_map(version_ids)
1454
for version_id in version_ids:
1457
while cursor is not None:
1458
record, record_details, digest, next = record_map[cursor]
1459
components.append((cursor, record, record_details, digest))
1460
if cursor in content_map:
1465
for (component_id, record, record_details,
1466
digest) in reversed(components):
1467
if component_id in content_map:
1468
content = content_map[component_id]
1470
content, delta = self.factory.parse_record(version_id,
1471
record, record_details, content,
1472
copy_base_content=multiple_versions)
1473
if multiple_versions:
1474
content_map[component_id] = content
1476
content.cleanup_eol(copy_on_mutate=multiple_versions)
1477
final_content[version_id] = content
1479
# digest here is the digest from the last applied component.
1480
text = content.text()
1481
actual_sha = sha_strings(text)
1482
if actual_sha != digest:
1483
raise KnitCorrupt(self.filename,
1485
'\n of reconstructed text does not match'
1487
'\n for version %s' %
1488
(actual_sha, digest, version_id))
1489
text_map[version_id] = text
1490
return text_map, final_content
1492
def iter_lines_added_or_present_in_versions(self, version_ids=None,
1494
"""See VersionedFile.iter_lines_added_or_present_in_versions()."""
1495
if version_ids is None:
1496
version_ids = self.versions()
1498
pb = progress.DummyProgress()
1499
# we don't care about inclusions, the caller cares.
1500
# but we need to setup a list of records to visit.
1501
# we need version_id, position, length
1502
version_id_records = []
1503
requested_versions = set(version_ids)
1504
# filter for available versions
1505
for version_id in requested_versions:
1506
if not self.has_version(version_id):
1507
raise RevisionNotPresent(version_id, self.filename)
1508
# get a in-component-order queue:
1509
for version_id in self.versions():
1510
if version_id in requested_versions:
1511
index_memo = self._index.get_position(version_id)
1512
version_id_records.append((version_id, index_memo))
1514
total = len(version_id_records)
1515
for version_idx, (version_id, data, sha_value) in \
1516
enumerate(self._data.read_records_iter(version_id_records)):
1517
pb.update('Walking content.', version_idx, total)
1518
method = self._index.get_method(version_id)
1519
if method == 'fulltext':
1520
line_iterator = self.factory.get_fulltext_content(data)
1521
elif method == 'line-delta':
1522
line_iterator = self.factory.get_linedelta_content(data)
1524
raise ValueError('invalid method %r' % (method,))
1525
# XXX: It might be more efficient to yield (version_id,
1526
# line_iterator) in the future. However for now, this is a simpler
1527
# change to integrate into the rest of the codebase. RBC 20071110
1528
for line in line_iterator:
1529
yield line, version_id
1531
pb.update('Walking content.', total, total)
1533
def num_versions(self):
1534
"""See VersionedFile.num_versions()."""
1535
return self._index.num_versions()
1537
__len__ = num_versions
1539
def annotate(self, version_id):
1540
"""See VersionedFile.annotate."""
1541
return self.factory.annotate(self, version_id)
1543
def get_parent_map(self, version_ids):
1544
"""See VersionedFile.get_parent_map."""
1545
return self._index.get_parent_map(version_ids)
1547
def get_ancestry(self, versions, topo_sorted=True):
1548
"""See VersionedFile.get_ancestry."""
1549
if isinstance(versions, basestring):
1550
versions = [versions]
1553
return self._index.get_ancestry(versions, topo_sorted)
1555
def get_ancestry_with_ghosts(self, versions):
1556
"""See VersionedFile.get_ancestry_with_ghosts."""
1557
if isinstance(versions, basestring):
1558
versions = [versions]
1561
return self._index.get_ancestry_with_ghosts(versions)
1563
def plan_merge(self, ver_a, ver_b):
1564
"""See VersionedFile.plan_merge."""
1565
ancestors_b = set(self.get_ancestry(ver_b, topo_sorted=False))
1566
ancestors_a = set(self.get_ancestry(ver_a, topo_sorted=False))
1567
annotated_a = self.annotate(ver_a)
1568
annotated_b = self.annotate(ver_b)
1569
return merge._plan_annotate_merge(annotated_a, annotated_b,
1570
ancestors_a, ancestors_b)
1573
class _KnitComponentFile(object):
1574
"""One of the files used to implement a knit database"""
1576
def __init__(self, transport, filename, mode, file_mode=None,
1577
create_parent_dir=False, dir_mode=None):
1578
self._transport = transport
1579
self._filename = filename
1581
self._file_mode = file_mode
1582
self._dir_mode = dir_mode
1583
self._create_parent_dir = create_parent_dir
1584
self._need_to_create = False
1586
def _full_path(self):
1587
"""Return the full path to this file."""
1588
return self._transport.base + self._filename
1590
def check_header(self, fp):
1591
line = fp.readline()
1593
# An empty file can actually be treated as though the file doesn't
1595
raise errors.NoSuchFile(self._full_path())
1596
if line != self.HEADER:
1597
raise KnitHeaderError(badline=line,
1598
filename=self._transport.abspath(self._filename))
1601
return '%s(%s)' % (self.__class__.__name__, self._filename)
1604
class _KnitIndex(_KnitComponentFile):
1605
"""Manages knit index file.
1607
The index is already kept in memory and read on startup, to enable
1608
fast lookups of revision information. The cursor of the index
1609
file is always pointing to the end, making it easy to append
1612
_cache is a cache for fast mapping from version id to a Index
1615
_history is a cache for fast mapping from indexes to version ids.
1617
The index data format is dictionary compressed when it comes to
1618
parent references; a index entry may only have parents that with a
1619
lover index number. As a result, the index is topological sorted.
1621
Duplicate entries may be written to the index for a single version id
1622
if this is done then the latter one completely replaces the former:
1623
this allows updates to correct version and parent information.
1624
Note that the two entries may share the delta, and that successive
1625
annotations and references MUST point to the first entry.
1627
The index file on disc contains a header, followed by one line per knit
1628
record. The same revision can be present in an index file more than once.
1629
The first occurrence gets assigned a sequence number starting from 0.
1631
The format of a single line is
1632
REVISION_ID FLAGS BYTE_OFFSET LENGTH( PARENT_ID|PARENT_SEQUENCE_ID)* :\n
1633
REVISION_ID is a utf8-encoded revision id
1634
FLAGS is a comma separated list of flags about the record. Values include
1635
no-eol, line-delta, fulltext.
1636
BYTE_OFFSET is the ascii representation of the byte offset in the data file
1637
that the the compressed data starts at.
1638
LENGTH is the ascii representation of the length of the data file.
1639
PARENT_ID a utf-8 revision id prefixed by a '.' that is a parent of
1641
PARENT_SEQUENCE_ID the ascii representation of the sequence number of a
1642
revision id already in the knit that is a parent of REVISION_ID.
1643
The ' :' marker is the end of record marker.
1646
when a write is interrupted to the index file, it will result in a line
1647
that does not end in ' :'. If the ' :' is not present at the end of a line,
1648
or at the end of the file, then the record that is missing it will be
1649
ignored by the parser.
1651
When writing new records to the index file, the data is preceded by '\n'
1652
to ensure that records always start on new lines even if the last write was
1653
interrupted. As a result its normal for the last line in the index to be
1654
missing a trailing newline. One can be added with no harmful effects.
1657
HEADER = "# bzr knit index 8\n"
1659
# speed of knit parsing went from 280 ms to 280 ms with slots addition.
1660
# __slots__ = ['_cache', '_history', '_transport', '_filename']
1662
def _cache_version(self, version_id, options, pos, size, parents):
1663
"""Cache a version record in the history array and index cache.
1665
This is inlined into _load_data for performance. KEEP IN SYNC.
1666
(It saves 60ms, 25% of the __init__ overhead on local 4000 record
1669
# only want the _history index to reference the 1st index entry
1671
if version_id not in self._cache:
1672
index = len(self._history)
1673
self._history.append(version_id)
1675
index = self._cache[version_id][5]
1676
self._cache[version_id] = (version_id,
1683
def _check_write_ok(self):
1684
if self._get_scope() != self._scope:
1685
raise errors.OutSideTransaction()
1686
if self._mode != 'w':
1687
raise errors.ReadOnlyObjectDirtiedError(self)
1689
def __init__(self, transport, filename, mode, create=False, file_mode=None,
1690
create_parent_dir=False, delay_create=False, dir_mode=None,
1692
_KnitComponentFile.__init__(self, transport, filename, mode,
1693
file_mode=file_mode,
1694
create_parent_dir=create_parent_dir,
1697
# position in _history is the 'official' index for a revision
1698
# but the values may have come from a newer entry.
1699
# so - wc -l of a knit index is != the number of unique names
1703
fp = self._transport.get(self._filename)
1705
# _load_data may raise NoSuchFile if the target knit is
1707
_load_data(self, fp)
1711
if mode != 'w' or not create:
1714
self._need_to_create = True
1716
self._transport.put_bytes_non_atomic(
1717
self._filename, self.HEADER, mode=self._file_mode)
1718
self._scope = get_scope()
1719
self._get_scope = get_scope
1721
def get_ancestry(self, versions, topo_sorted=True):
1722
"""See VersionedFile.get_ancestry."""
1723
# get a graph of all the mentioned versions:
1725
pending = set(versions)
1728
version = pending.pop()
1731
parents = [p for p in cache[version][4] if p in cache]
1733
raise RevisionNotPresent(version, self._filename)
1734
# if not completed and not a ghost
1735
pending.update([p for p in parents if p not in graph])
1736
graph[version] = parents
1739
return topo_sort(graph.items())
1741
def get_ancestry_with_ghosts(self, versions):
1742
"""See VersionedFile.get_ancestry_with_ghosts."""
1743
# get a graph of all the mentioned versions:
1744
self.check_versions_present(versions)
1747
pending = set(versions)
1749
version = pending.pop()
1751
parents = cache[version][4]
1757
pending.update([p for p in parents if p not in graph])
1758
graph[version] = parents
1759
return topo_sort(graph.items())
1761
def get_build_details(self, version_ids):
1762
"""Get the method, index_memo and compression parent for version_ids.
1764
Ghosts are omitted from the result.
1766
:param version_ids: An iterable of version_ids.
1767
:return: A dict of version_id:(index_memo, compression_parent,
1768
parents, record_details).
1770
opaque structure to pass to read_records to extract the raw
1773
Content that this record is built upon, may be None
1775
Logical parents of this node
1777
extra information about the content which needs to be passed to
1778
Factory.parse_record
1781
for version_id in version_ids:
1782
if version_id not in self._cache:
1783
# ghosts are omitted
1785
method = self.get_method(version_id)
1786
parents = self.get_parents_with_ghosts(version_id)
1787
if method == 'fulltext':
1788
compression_parent = None
1790
compression_parent = parents[0]
1791
noeol = 'no-eol' in self.get_options(version_id)
1792
index_memo = self.get_position(version_id)
1793
result[version_id] = (index_memo, compression_parent,
1794
parents, (method, noeol))
1797
def num_versions(self):
1798
return len(self._history)
1800
__len__ = num_versions
1802
def get_versions(self):
1803
"""Get all the versions in the file. not topologically sorted."""
1804
return self._history
1806
def _version_list_to_index(self, versions):
1809
for version in versions:
1810
if version in cache:
1811
# -- inlined lookup() --
1812
result_list.append(str(cache[version][5]))
1813
# -- end lookup () --
1815
result_list.append('.' + version)
1816
return ' '.join(result_list)
1818
def add_version(self, version_id, options, index_memo, parents):
1819
"""Add a version record to the index."""
1820
self.add_versions(((version_id, options, index_memo, parents),))
1822
def add_versions(self, versions, random_id=False):
1823
"""Add multiple versions to the index.
1825
:param versions: a list of tuples:
1826
(version_id, options, pos, size, parents).
1827
:param random_id: If True the ids being added were randomly generated
1828
and no check for existence will be performed.
1831
orig_history = self._history[:]
1832
orig_cache = self._cache.copy()
1835
for version_id, options, (index, pos, size), parents in versions:
1836
line = "\n%s %s %s %s %s :" % (version_id,
1840
self._version_list_to_index(parents))
1842
self._cache_version(version_id, options, pos, size, tuple(parents))
1843
if not self._need_to_create:
1844
self._transport.append_bytes(self._filename, ''.join(lines))
1847
sio.write(self.HEADER)
1848
sio.writelines(lines)
1850
self._transport.put_file_non_atomic(self._filename, sio,
1851
create_parent_dir=self._create_parent_dir,
1852
mode=self._file_mode,
1853
dir_mode=self._dir_mode)
1854
self._need_to_create = False
1856
# If any problems happen, restore the original values and re-raise
1857
self._history = orig_history
1858
self._cache = orig_cache
1861
def has_version(self, version_id):
1862
"""True if the version is in the index."""
1863
return version_id in self._cache
1865
def get_position(self, version_id):
1866
"""Return details needed to access the version.
1868
.kndx indices do not support split-out data, so return None for the
1871
:return: a tuple (None, data position, size) to hand to the access
1872
logic to get the record.
1874
entry = self._cache[version_id]
1875
return None, entry[2], entry[3]
1877
def get_method(self, version_id):
1878
"""Return compression method of specified version."""
1880
options = self._cache[version_id][1]
1882
raise RevisionNotPresent(version_id, self._filename)
1883
if 'fulltext' in options:
1886
if 'line-delta' not in options:
1887
raise errors.KnitIndexUnknownMethod(self._full_path(), options)
1890
def get_options(self, version_id):
1891
"""Return a list representing options.
1895
return self._cache[version_id][1]
1897
def get_parent_map(self, version_ids):
1898
"""Passed through to by KnitVersionedFile.get_parent_map."""
1900
for version_id in version_ids:
1902
result[version_id] = tuple(self._cache[version_id][4])
1907
def get_parents_with_ghosts(self, version_id):
1908
"""Return parents of specified version with ghosts."""
1910
return self.get_parent_map([version_id])[version_id]
1912
raise RevisionNotPresent(version_id, self)
1914
def check_versions_present(self, version_ids):
1915
"""Check that all specified versions are present."""
1917
for version_id in version_ids:
1918
if version_id not in cache:
1919
raise RevisionNotPresent(version_id, self._filename)
1922
class KnitGraphIndex(object):
1923
"""A knit index that builds on GraphIndex."""
1925
def __init__(self, graph_index, deltas=False, parents=True, add_callback=None):
1926
"""Construct a KnitGraphIndex on a graph_index.
1928
:param graph_index: An implementation of bzrlib.index.GraphIndex.
1929
:param deltas: Allow delta-compressed records.
1930
:param add_callback: If not None, allow additions to the index and call
1931
this callback with a list of added GraphIndex nodes:
1932
[(node, value, node_refs), ...]
1933
:param parents: If True, record knits parents, if not do not record
1936
self._graph_index = graph_index
1937
self._deltas = deltas
1938
self._add_callback = add_callback
1939
self._parents = parents
1940
if deltas and not parents:
1941
raise KnitCorrupt(self, "Cannot do delta compression without "
1944
def _check_write_ok(self):
1947
def _get_entries(self, keys, check_present=False):
1948
"""Get the entries for keys.
1950
:param keys: An iterable of index keys, - 1-tuples.
1955
for node in self._graph_index.iter_entries(keys):
1957
found_keys.add(node[1])
1959
# adapt parentless index to the rest of the code.
1960
for node in self._graph_index.iter_entries(keys):
1961
yield node[0], node[1], node[2], ()
1962
found_keys.add(node[1])
1964
missing_keys = keys.difference(found_keys)
1966
raise RevisionNotPresent(missing_keys.pop(), self)
1968
def _present_keys(self, version_ids):
1970
node[1] for node in self._get_entries(version_ids)])
1972
def _parentless_ancestry(self, versions):
1973
"""Honour the get_ancestry API for parentless knit indices."""
1974
wanted_keys = self._version_ids_to_keys(versions)
1975
present_keys = self._present_keys(wanted_keys)
1976
missing = set(wanted_keys).difference(present_keys)
1978
raise RevisionNotPresent(missing.pop(), self)
1979
return list(self._keys_to_version_ids(present_keys))
1981
def get_ancestry(self, versions, topo_sorted=True):
1982
"""See VersionedFile.get_ancestry."""
1983
if not self._parents:
1984
return self._parentless_ancestry(versions)
1985
# XXX: This will do len(history) index calls - perhaps
1986
# it should be altered to be a index core feature?
1987
# get a graph of all the mentioned versions:
1990
versions = self._version_ids_to_keys(versions)
1991
pending = set(versions)
1993
# get all pending nodes
1994
this_iteration = pending
1995
new_nodes = self._get_entries(this_iteration)
1998
for (index, key, value, node_refs) in new_nodes:
1999
# dont ask for ghosties - otherwise
2000
# we we can end up looping with pending
2001
# being entirely ghosted.
2002
graph[key] = [parent for parent in node_refs[0]
2003
if parent not in ghosts]
2005
for parent in graph[key]:
2006
# dont examine known nodes again
2011
ghosts.update(this_iteration.difference(found))
2012
if versions.difference(graph):
2013
raise RevisionNotPresent(versions.difference(graph).pop(), self)
2015
result_keys = topo_sort(graph.items())
2017
result_keys = graph.iterkeys()
2018
return [key[0] for key in result_keys]
2020
def get_ancestry_with_ghosts(self, versions):
2021
"""See VersionedFile.get_ancestry."""
2022
if not self._parents:
2023
return self._parentless_ancestry(versions)
2024
# XXX: This will do len(history) index calls - perhaps
2025
# it should be altered to be a index core feature?
2026
# get a graph of all the mentioned versions:
2028
versions = self._version_ids_to_keys(versions)
2029
pending = set(versions)
2031
# get all pending nodes
2032
this_iteration = pending
2033
new_nodes = self._get_entries(this_iteration)
2035
for (index, key, value, node_refs) in new_nodes:
2036
graph[key] = node_refs[0]
2038
for parent in graph[key]:
2039
# dont examine known nodes again
2043
missing_versions = this_iteration.difference(graph)
2044
missing_needed = versions.intersection(missing_versions)
2046
raise RevisionNotPresent(missing_needed.pop(), self)
2047
for missing_version in missing_versions:
2048
# add a key, no parents
2049
graph[missing_version] = []
2050
pending.discard(missing_version) # don't look for it
2051
result_keys = topo_sort(graph.items())
2052
return [key[0] for key in result_keys]
2054
def get_build_details(self, version_ids):
2055
"""Get the method, index_memo and compression parent for version_ids.
2057
Ghosts are omitted from the result.
2059
:param version_ids: An iterable of version_ids.
2060
:return: A dict of version_id:(index_memo, compression_parent,
2061
parents, record_details).
2063
opaque structure to pass to read_records to extract the raw
2066
Content that this record is built upon, may be None
2068
Logical parents of this node
2070
extra information about the content which needs to be passed to
2071
Factory.parse_record
2074
entries = self._get_entries(self._version_ids_to_keys(version_ids), True)
2075
for entry in entries:
2076
version_id = self._keys_to_version_ids((entry[1],))[0]
2077
if not self._parents:
2080
parents = self._keys_to_version_ids(entry[3][0])
2081
if not self._deltas:
2082
compression_parent = None
2084
compression_parent_key = self._compression_parent(entry)
2085
if compression_parent_key:
2086
compression_parent = self._keys_to_version_ids(
2087
(compression_parent_key,))[0]
2089
compression_parent = None
2090
noeol = (entry[2][0] == 'N')
2091
if compression_parent:
2092
method = 'line-delta'
2095
result[version_id] = (self._node_to_position(entry),
2096
compression_parent, parents,
2100
def _compression_parent(self, an_entry):
2101
# return the key that an_entry is compressed against, or None
2102
# Grab the second parent list (as deltas implies parents currently)
2103
compression_parents = an_entry[3][1]
2104
if not compression_parents:
2106
return compression_parents[0]
2108
def _get_method(self, node):
2109
if not self._deltas:
2111
if self._compression_parent(node):
2116
def num_versions(self):
2117
return len(list(self._graph_index.iter_all_entries()))
2119
__len__ = num_versions
2121
def get_versions(self):
2122
"""Get all the versions in the file. not topologically sorted."""
2123
return [node[1][0] for node in self._graph_index.iter_all_entries()]
2125
def has_version(self, version_id):
2126
"""True if the version is in the index."""
2127
return len(self._present_keys(self._version_ids_to_keys([version_id]))) == 1
2129
def _keys_to_version_ids(self, keys):
2130
return tuple(key[0] for key in keys)
2132
def get_position(self, version_id):
2133
"""Return details needed to access the version.
2135
:return: a tuple (index, data position, size) to hand to the access
2136
logic to get the record.
2138
node = self._get_node(version_id)
2139
return self._node_to_position(node)
2141
def _node_to_position(self, node):
2142
"""Convert an index value to position details."""
2143
bits = node[2][1:].split(' ')
2144
return node[0], int(bits[0]), int(bits[1])
2146
def get_method(self, version_id):
2147
"""Return compression method of specified version."""
2148
return self._get_method(self._get_node(version_id))
2150
def _get_node(self, version_id):
2152
return list(self._get_entries(self._version_ids_to_keys([version_id])))[0]
2154
raise RevisionNotPresent(version_id, self)
2156
def get_options(self, version_id):
2157
"""Return a list representing options.
2161
node = self._get_node(version_id)
2162
options = [self._get_method(node)]
2163
if node[2][0] == 'N':
2164
options.append('no-eol')
2167
def get_parent_map(self, version_ids):
2168
"""Passed through to by KnitVersionedFile.get_parent_map."""
2169
nodes = self._get_entries(self._version_ids_to_keys(version_ids))
2173
result[node[1][0]] = self._keys_to_version_ids(node[3][0])
2176
result[node[1][0]] = ()
2179
def get_parents_with_ghosts(self, version_id):
2180
"""Return parents of specified version with ghosts."""
2182
return self.get_parent_map([version_id])[version_id]
2184
raise RevisionNotPresent(version_id, self)
2186
def check_versions_present(self, version_ids):
2187
"""Check that all specified versions are present."""
2188
keys = self._version_ids_to_keys(version_ids)
2189
present = self._present_keys(keys)
2190
missing = keys.difference(present)
2192
raise RevisionNotPresent(missing.pop(), self)
2194
def add_version(self, version_id, options, access_memo, parents):
2195
"""Add a version record to the index."""
2196
return self.add_versions(((version_id, options, access_memo, parents),))
2198
def add_versions(self, versions, random_id=False):
2199
"""Add multiple versions to the index.
2201
This function does not insert data into the Immutable GraphIndex
2202
backing the KnitGraphIndex, instead it prepares data for insertion by
2203
the caller and checks that it is safe to insert then calls
2204
self._add_callback with the prepared GraphIndex nodes.
2206
:param versions: a list of tuples:
2207
(version_id, options, pos, size, parents).
2208
:param random_id: If True the ids being added were randomly generated
2209
and no check for existence will be performed.
2211
if not self._add_callback:
2212
raise errors.ReadOnlyError(self)
2213
# we hope there are no repositories with inconsistent parentage
2218
for (version_id, options, access_memo, parents) in versions:
2219
index, pos, size = access_memo
2220
key = (version_id, )
2221
parents = tuple((parent, ) for parent in parents)
2222
if 'no-eol' in options:
2226
value += "%d %d" % (pos, size)
2227
if not self._deltas:
2228
if 'line-delta' in options:
2229
raise KnitCorrupt(self, "attempt to add line-delta in non-delta knit")
2232
if 'line-delta' in options:
2233
node_refs = (parents, (parents[0],))
2235
node_refs = (parents, ())
2237
node_refs = (parents, )
2240
raise KnitCorrupt(self, "attempt to add node with parents "
2241
"in parentless index.")
2243
keys[key] = (value, node_refs)
2245
present_nodes = self._get_entries(keys)
2246
for (index, key, value, node_refs) in present_nodes:
2247
if (value, node_refs) != keys[key]:
2248
raise KnitCorrupt(self, "inconsistent details in add_versions"
2249
": %s %s" % ((value, node_refs), keys[key]))
2253
for key, (value, node_refs) in keys.iteritems():
2254
result.append((key, value, node_refs))
2256
for key, (value, node_refs) in keys.iteritems():
2257
result.append((key, value))
2258
self._add_callback(result)
2260
def _version_ids_to_keys(self, version_ids):
2261
return set((version_id, ) for version_id in version_ids)
2264
class _KnitAccess(object):
2265
"""Access to knit records in a .knit file."""
2267
def __init__(self, transport, filename, _file_mode, _dir_mode,
2268
_need_to_create, _create_parent_dir):
2269
"""Create a _KnitAccess for accessing and inserting data.
2271
:param transport: The transport the .knit is located on.
2272
:param filename: The filename of the .knit.
2274
self._transport = transport
2275
self._filename = filename
2276
self._file_mode = _file_mode
2277
self._dir_mode = _dir_mode
2278
self._need_to_create = _need_to_create
2279
self._create_parent_dir = _create_parent_dir
2281
def add_raw_records(self, sizes, raw_data):
2282
"""Add raw knit bytes to a storage area.
2284
The data is spooled to whereever the access method is storing data.
2286
:param sizes: An iterable containing the size of each raw data segment.
2287
:param raw_data: A bytestring containing the data.
2288
:return: A list of memos to retrieve the record later. Each memo is a
2289
tuple - (index, pos, length), where the index field is always None
2290
for the .knit access method.
2292
if not self._need_to_create:
2293
base = self._transport.append_bytes(self._filename, raw_data)
2295
self._transport.put_bytes_non_atomic(self._filename, raw_data,
2296
create_parent_dir=self._create_parent_dir,
2297
mode=self._file_mode,
2298
dir_mode=self._dir_mode)
2299
self._need_to_create = False
2303
result.append((None, base, size))
2308
"""IFF this data access has its own storage area, initialise it.
2312
self._transport.put_bytes_non_atomic(self._filename, '',
2313
mode=self._file_mode)
2315
def open_file(self):
2316
"""IFF this data access can be represented as a single file, open it.
2318
For knits that are not mapped to a single file on disk this will
2321
:return: None or a file handle.
2324
return self._transport.get(self._filename)
2329
def get_raw_records(self, memos_for_retrieval):
2330
"""Get the raw bytes for a records.
2332
:param memos_for_retrieval: An iterable containing the (index, pos,
2333
length) memo for retrieving the bytes. The .knit method ignores
2334
the index as there is always only a single file.
2335
:return: An iterator over the bytes of the records.
2337
read_vector = [(pos, size) for (index, pos, size) in memos_for_retrieval]
2338
for pos, data in self._transport.readv(self._filename, read_vector):
2342
class _PackAccess(object):
2343
"""Access to knit records via a collection of packs."""
2345
def __init__(self, index_to_packs, writer=None):
2346
"""Create a _PackAccess object.
2348
:param index_to_packs: A dict mapping index objects to the transport
2349
and file names for obtaining data.
2350
:param writer: A tuple (pack.ContainerWriter, write_index) which
2351
contains the pack to write, and the index that reads from it will
2355
self.container_writer = writer[0]
2356
self.write_index = writer[1]
2358
self.container_writer = None
2359
self.write_index = None
2360
self.indices = index_to_packs
2362
def add_raw_records(self, sizes, raw_data):
2363
"""Add raw knit bytes to a storage area.
2365
The data is spooled to the container writer in one bytes-record per
2368
:param sizes: An iterable containing the size of each raw data segment.
2369
:param raw_data: A bytestring containing the data.
2370
:return: A list of memos to retrieve the record later. Each memo is a
2371
tuple - (index, pos, length), where the index field is the
2372
write_index object supplied to the PackAccess object.
2377
p_offset, p_length = self.container_writer.add_bytes_record(
2378
raw_data[offset:offset+size], [])
2380
result.append((self.write_index, p_offset, p_length))
2384
"""Pack based knits do not get individually created."""
2386
def get_raw_records(self, memos_for_retrieval):
2387
"""Get the raw bytes for a records.
2389
:param memos_for_retrieval: An iterable containing the (index, pos,
2390
length) memo for retrieving the bytes. The Pack access method
2391
looks up the pack to use for a given record in its index_to_pack
2393
:return: An iterator over the bytes of the records.
2395
# first pass, group into same-index requests
2397
current_index = None
2398
for (index, offset, length) in memos_for_retrieval:
2399
if current_index == index:
2400
current_list.append((offset, length))
2402
if current_index is not None:
2403
request_lists.append((current_index, current_list))
2404
current_index = index
2405
current_list = [(offset, length)]
2406
# handle the last entry
2407
if current_index is not None:
2408
request_lists.append((current_index, current_list))
2409
for index, offsets in request_lists:
2410
transport, path = self.indices[index]
2411
reader = pack.make_readv_reader(transport, path, offsets)
2412
for names, read_func in reader.iter_records():
2413
yield read_func(None)
2415
def open_file(self):
2416
"""Pack based knits have no single file."""
2419
def set_writer(self, writer, index, (transport, packname)):
2420
"""Set a writer to use for adding data."""
2421
if index is not None:
2422
self.indices[index] = (transport, packname)
2423
self.container_writer = writer
2424
self.write_index = index
2427
class _StreamAccess(object):
2428
"""A Knit Access object that provides data from a datastream.
2430
It also provides a fallback to present as unannotated data, annotated data
2431
from a *backing* access object.
2433
This is triggered by a index_memo which is pointing to a different index
2434
than this was constructed with, and is used to allow extracting full
2435
unannotated texts for insertion into annotated knits.
2438
def __init__(self, reader_callable, stream_index, backing_knit,
2440
"""Create a _StreamAccess object.
2442
:param reader_callable: The reader_callable from the datastream.
2443
This is called to buffer all the data immediately, for
2445
:param stream_index: The index the data stream this provides access to
2446
which will be present in native index_memo's.
2447
:param backing_knit: The knit object that will provide access to
2448
annotated texts which are not available in the stream, so as to
2449
create unannotated texts.
2450
:param orig_factory: The original content factory used to generate the
2451
stream. This is used for checking whether the thunk code for
2452
supporting _copy_texts will generate the correct form of data.
2454
self.data = reader_callable(None)
2455
self.stream_index = stream_index
2456
self.backing_knit = backing_knit
2457
self.orig_factory = orig_factory
2459
def get_raw_records(self, memos_for_retrieval):
2460
"""Get the raw bytes for a records.
2462
:param memos_for_retrieval: An iterable of memos from the
2463
_StreamIndex object identifying bytes to read; for these classes
2464
they are (from_backing_knit, index, start, end) and can point to
2465
either the backing knit or streamed data.
2466
:return: An iterator yielding a byte string for each record in
2467
memos_for_retrieval.
2469
# use a generator for memory friendliness
2470
for from_backing_knit, version_id, start, end in memos_for_retrieval:
2471
if not from_backing_knit:
2472
if version_id is not self.stream_index:
2473
raise AssertionError()
2474
yield self.data[start:end]
2476
# we have been asked to thunk. This thunking only occurs when
2477
# we are obtaining plain texts from an annotated backing knit
2478
# so that _copy_texts will work.
2479
# We could improve performance here by scanning for where we need
2480
# to do this and using get_line_list, then interleaving the output
2481
# as desired. However, for now, this is sufficient.
2482
if self.orig_factory.__class__ != KnitPlainFactory:
2483
raise errors.KnitCorrupt(
2484
self, 'Bad thunk request %r cannot be backed by %r' %
2485
(version_id, self.orig_factory))
2486
lines = self.backing_knit.get_lines(version_id)
2487
line_bytes = ''.join(lines)
2488
digest = sha_string(line_bytes)
2489
# the packed form of the fulltext always has a trailing newline,
2490
# even if the actual text does not, unless the file is empty. the
2491
# record options including the noeol flag are passed through by
2492
# _StreamIndex, so this is safe.
2494
if lines[-1][-1] != '\n':
2495
lines[-1] = lines[-1] + '\n'
2497
# We want plain data, because we expect to thunk only to allow text
2499
size, bytes = self.backing_knit._data._record_to_data(version_id,
2500
digest, lines, line_bytes)
2504
class _StreamIndex(object):
2505
"""A Knit Index object that uses the data map from a datastream."""
2507
def __init__(self, data_list, backing_index):
2508
"""Create a _StreamIndex object.
2510
:param data_list: The data_list from the datastream.
2511
:param backing_index: The index which will supply values for nodes
2512
referenced outside of this stream.
2514
self.data_list = data_list
2515
self.backing_index = backing_index
2516
self._by_version = {}
2518
for key, options, length, parents in data_list:
2519
self._by_version[key] = options, (pos, pos + length), parents
2522
def get_ancestry(self, versions, topo_sorted):
2523
"""Get an ancestry list for versions."""
2525
# Not needed for basic joins
2526
raise NotImplementedError(self.get_ancestry)
2527
# get a graph of all the mentioned versions:
2528
# Little ugly - basically copied from KnitIndex, but don't want to
2529
# accidentally incorporate too much of that index's code.
2531
pending = set(versions)
2532
cache = self._by_version
2534
version = pending.pop()
2537
parents = [p for p in cache[version][2] if p in cache]
2539
raise RevisionNotPresent(version, self)
2540
# if not completed and not a ghost
2541
pending.update([p for p in parents if p not in ancestry])
2542
ancestry.add(version)
2543
return list(ancestry)
2545
def get_build_details(self, version_ids):
2546
"""Get the method, index_memo and compression parent for version_ids.
2548
Ghosts are omitted from the result.
2550
:param version_ids: An iterable of version_ids.
2551
:return: A dict of version_id:(index_memo, compression_parent,
2552
parents, record_details).
2554
opaque memo that can be passed to _StreamAccess.read_records
2555
to extract the raw data; for these classes it is
2556
(from_backing_knit, index, start, end)
2558
Content that this record is built upon, may be None
2560
Logical parents of this node
2562
extra information about the content which needs to be passed to
2563
Factory.parse_record
2566
for version_id in version_ids:
2568
method = self.get_method(version_id)
2569
except errors.RevisionNotPresent:
2570
# ghosts are omitted
2572
parent_ids = self.get_parents_with_ghosts(version_id)
2573
noeol = ('no-eol' in self.get_options(version_id))
2574
index_memo = self.get_position(version_id)
2575
from_backing_knit = index_memo[0]
2576
if from_backing_knit:
2577
# texts retrieved from the backing knit are always full texts
2579
if method == 'fulltext':
2580
compression_parent = None
2582
compression_parent = parent_ids[0]
2583
result[version_id] = (index_memo, compression_parent,
2584
parent_ids, (method, noeol))
2587
def get_method(self, version_id):
2588
"""Return compression method of specified version."""
2589
options = self.get_options(version_id)
2590
if 'fulltext' in options:
2592
elif 'line-delta' in options:
2595
raise errors.KnitIndexUnknownMethod(self, options)
2597
def get_options(self, version_id):
2598
"""Return a list representing options.
2603
return self._by_version[version_id][0]
2605
options = list(self.backing_index.get_options(version_id))
2606
if 'fulltext' in options:
2608
elif 'line-delta' in options:
2609
# Texts from the backing knit are always returned from the stream
2611
options.remove('line-delta')
2612
options.append('fulltext')
2614
raise errors.KnitIndexUnknownMethod(self, options)
2615
return tuple(options)
2617
def get_parent_map(self, version_ids):
2618
"""Passed through to by KnitVersionedFile.get_parent_map."""
2621
for version_id in version_ids:
2623
result[version_id] = self._by_version[version_id][2]
2625
pending_ids.add(version_id)
2626
result.update(self.backing_index.get_parent_map(pending_ids))
2629
def get_parents_with_ghosts(self, version_id):
2630
"""Return parents of specified version with ghosts."""
2632
return self.get_parent_map([version_id])[version_id]
2634
raise RevisionNotPresent(version_id, self)
2636
def get_position(self, version_id):
2637
"""Return details needed to access the version.
2639
_StreamAccess has the data as a big array, so we return slice
2640
coordinates into that (as index_memo's are opaque outside the
2641
index and matching access class).
2643
:return: a tuple (from_backing_knit, index, start, end) that can
2644
be passed e.g. to get_raw_records.
2645
If from_backing_knit is False, index will be self, otherwise it
2646
will be a version id.
2649
start, end = self._by_version[version_id][1]
2650
return False, self, start, end
2652
# Signal to the access object to handle this from the backing knit.
2653
return (True, version_id, None, None)
2655
def get_versions(self):
2656
"""Get all the versions in the stream."""
2657
return self._by_version.keys()
2660
class _KnitData(object):
2661
"""Manage extraction of data from a KnitAccess, caching and decompressing.
2663
The KnitData class provides the logic for parsing and using knit records,
2664
making use of an access method for the low level read and write operations.
2667
def __init__(self, access):
2668
"""Create a KnitData object.
2670
:param access: The access method to use. Access methods such as
2671
_KnitAccess manage the insertion of raw records and the subsequent
2672
retrieval of the same.
2674
self._access = access
2675
self._checked = False
2677
def _open_file(self):
2678
return self._access.open_file()
2680
def _record_to_data(self, version_id, digest, lines, dense_lines=None):
2681
"""Convert version_id, digest, lines into a raw data block.
2683
:param dense_lines: The bytes of lines but in a denser form. For
2684
instance, if lines is a list of 1000 bytestrings each ending in \n,
2685
dense_lines may be a list with one line in it, containing all the
2686
1000's lines and their \n's. Using dense_lines if it is already
2687
known is a win because the string join to create bytes in this
2688
function spends less time resizing the final string.
2689
:return: (len, a StringIO instance with the raw data ready to read.)
2691
# Note: using a string copy here increases memory pressure with e.g.
2692
# ISO's, but it is about 3 seconds faster on a 1.2Ghz intel machine
2693
# when doing the initial commit of a mozilla tree. RBC 20070921
2694
bytes = ''.join(chain(
2695
["version %s %d %s\n" % (version_id,
2698
dense_lines or lines,
2699
["end %s\n" % version_id]))
2700
compressed_bytes = bytes_to_gzip(bytes)
2701
return len(compressed_bytes), compressed_bytes
2703
def add_raw_records(self, sizes, raw_data):
2704
"""Append a prepared record to the data file.
2706
:param sizes: An iterable containing the size of each raw data segment.
2707
:param raw_data: A bytestring containing the data.
2708
:return: a list of index data for the way the data was stored.
2709
See the access method add_raw_records documentation for more
2712
return self._access.add_raw_records(sizes, raw_data)
2714
def _parse_record_header(self, version_id, raw_data):
2715
"""Parse a record header for consistency.
2717
:return: the header and the decompressor stream.
2718
as (stream, header_record)
2720
df = GzipFile(mode='rb', fileobj=StringIO(raw_data))
2722
rec = self._check_header(version_id, df.readline())
2723
except Exception, e:
2724
raise KnitCorrupt(self._access,
2725
"While reading {%s} got %s(%s)"
2726
% (version_id, e.__class__.__name__, str(e)))
2729
def _split_header(self, line):
2732
raise KnitCorrupt(self._access,
2733
'unexpected number of elements in record header')
2736
def _check_header_version(self, rec, version_id):
2737
if rec[1] != version_id:
2738
raise KnitCorrupt(self._access,
2739
'unexpected version, wanted %r, got %r'
2740
% (version_id, rec[1]))
2742
def _check_header(self, version_id, line):
2743
rec = self._split_header(line)
2744
self._check_header_version(rec, version_id)
2747
def _parse_record_unchecked(self, data):
2749
# 4168 calls in 2880 217 internal
2750
# 4168 calls to _parse_record_header in 2121
2751
# 4168 calls to readlines in 330
2752
df = GzipFile(mode='rb', fileobj=StringIO(data))
2754
record_contents = df.readlines()
2755
except Exception, e:
2756
raise KnitCorrupt(self._access, "Corrupt compressed record %r, got %s(%s)" %
2757
(data, e.__class__.__name__, str(e)))
2758
header = record_contents.pop(0)
2759
rec = self._split_header(header)
2760
last_line = record_contents.pop()
2761
if len(record_contents) != int(rec[2]):
2762
raise KnitCorrupt(self._access,
2763
'incorrect number of lines %s != %s'
2765
% (len(record_contents), int(rec[2]),
2767
if last_line != 'end %s\n' % rec[1]:
2768
raise KnitCorrupt(self._access,
2769
'unexpected version end line %r, wanted %r'
2770
% (last_line, rec[1]))
2772
return rec, record_contents
2774
def _parse_record(self, version_id, data):
2775
rec, record_contents = self._parse_record_unchecked(data)
2776
self._check_header_version(rec, version_id)
2777
return record_contents, rec[3]
2779
def read_records_iter_raw(self, records):
2780
"""Read text records from data file and yield raw data.
2782
This unpacks enough of the text record to validate the id is
2783
as expected but thats all.
2785
Each item the iterator yields is (version_id, bytes,
2788
# setup an iterator of the external records:
2789
# uses readv so nice and fast we hope.
2791
# grab the disk data needed.
2792
needed_offsets = [index_memo for version_id, index_memo
2794
raw_records = self._access.get_raw_records(needed_offsets)
2796
for version_id, index_memo in records:
2797
data = raw_records.next()
2798
# validate the header
2799
df, rec = self._parse_record_header(version_id, data)
2801
yield version_id, data, rec[3]
2803
def read_records_iter(self, records):
2804
"""Read text records from data file and yield result.
2806
The result will be returned in whatever is the fastest to read.
2807
Not by the order requested. Also, multiple requests for the same
2808
record will only yield 1 response.
2809
:param records: A list of (version_id, pos, len) entries
2810
:return: Yields (version_id, contents, digest) in the order
2811
read, not the order requested
2816
needed_records = sorted(set(records), key=operator.itemgetter(1))
2817
if not needed_records:
2820
# The transport optimizes the fetching as well
2821
# (ie, reads continuous ranges.)
2822
raw_data = self._access.get_raw_records(
2823
[index_memo for version_id, index_memo in needed_records])
2825
for (version_id, index_memo), data in \
2826
izip(iter(needed_records), raw_data):
2827
content, digest = self._parse_record(version_id, data)
2828
yield version_id, content, digest
2830
def read_records(self, records):
2831
"""Read records into a dictionary."""
2833
for record_id, content, digest in \
2834
self.read_records_iter(records):
2835
components[record_id] = (content, digest)
2839
class InterKnit(InterVersionedFile):
2840
"""Optimised code paths for knit to knit operations."""
2842
_matching_file_from_factory = staticmethod(make_file_knit)
2843
_matching_file_to_factory = staticmethod(make_file_knit)
2846
def is_compatible(source, target):
2847
"""Be compatible with knits. """
2849
return (isinstance(source, KnitVersionedFile) and
2850
isinstance(target, KnitVersionedFile))
2851
except AttributeError:
2854
def _copy_texts(self, pb, msg, version_ids, ignore_missing=False):
2855
"""Copy texts to the target by extracting and adding them one by one.
2857
see join() for the parameter definitions.
2859
version_ids = self._get_source_version_ids(version_ids, ignore_missing)
2860
# --- the below is factorable out with VersionedFile.join, but wait for
2861
# VersionedFiles, it may all be simpler then.
2862
graph = Graph(self.source)
2863
search = graph._make_breadth_first_searcher(version_ids)
2864
transitive_ids = set()
2865
map(transitive_ids.update, list(search))
2866
parent_map = self.source.get_parent_map(transitive_ids)
2867
order = topo_sort(parent_map.items())
2869
def size_of_content(content):
2870
return sum(len(line) for line in content.text())
2871
# Cache at most 10MB of parent texts
2872
parent_cache = lru_cache.LRUSizeCache(max_size=10*1024*1024,
2873
compute_size=size_of_content)
2874
# TODO: jam 20071116 It would be nice to have a streaming interface to
2875
# get multiple texts from a source. The source could be smarter
2876
# about how it handled intermediate stages.
2877
# get_line_list() or make_mpdiffs() seem like a possibility, but
2878
# at the moment they extract all full texts into memory, which
2879
# causes us to store more than our 3x fulltext goal.
2880
# Repository.iter_files_bytes() may be another possibility
2881
to_process = [version for version in order
2882
if version not in self.target]
2883
total = len(to_process)
2884
pb = ui.ui_factory.nested_progress_bar()
2886
for index, version in enumerate(to_process):
2887
pb.update('Converting versioned data', index, total)
2888
sha1, num_bytes, parent_text = self.target.add_lines(version,
2889
self.source.get_parents_with_ghosts(version),
2890
self.source.get_lines(version),
2891
parent_texts=parent_cache)
2892
parent_cache[version] = parent_text
2897
def join(self, pb=None, msg=None, version_ids=None, ignore_missing=False):
2898
"""See InterVersionedFile.join."""
2899
# If the source and target are mismatched w.r.t. annotations vs
2900
# plain, the data needs to be converted accordingly
2901
if self.source.factory.annotated == self.target.factory.annotated:
2903
elif self.source.factory.annotated:
2904
converter = self._anno_to_plain_converter
2906
# We're converting from a plain to an annotated knit. Copy them
2907
# across by full texts.
2908
return self._copy_texts(pb, msg, version_ids, ignore_missing)
2910
version_ids = self._get_source_version_ids(version_ids, ignore_missing)
2914
pb = ui.ui_factory.nested_progress_bar()
2916
version_ids = list(version_ids)
2917
if None in version_ids:
2918
version_ids.remove(None)
2920
self.source_ancestry = set(self.source.get_ancestry(version_ids,
2922
this_versions = set(self.target._index.get_versions())
2923
# XXX: For efficiency we should not look at the whole index,
2924
# we only need to consider the referenced revisions - they
2925
# must all be present, or the method must be full-text.
2926
# TODO, RBC 20070919
2927
needed_versions = self.source_ancestry - this_versions
2929
if not needed_versions:
2931
full_list = topo_sort(
2932
self.source.get_parent_map(self.source.versions()))
2934
version_list = [i for i in full_list if (not self.target.has_version(i)
2935
and i in needed_versions)]
2939
copy_queue_records = []
2941
for version_id in version_list:
2942
options = self.source._index.get_options(version_id)
2943
parents = self.source._index.get_parents_with_ghosts(version_id)
2944
# check that its will be a consistent copy:
2945
for parent in parents:
2946
# if source has the parent, we must :
2947
# * already have it or
2948
# * have it scheduled already
2949
# otherwise we don't care
2950
if not (self.target.has_version(parent) or
2951
parent in copy_set or
2952
not self.source.has_version(parent)):
2953
raise AssertionError("problem joining parent %r "
2955
% (parent, self.source, self.target))
2956
index_memo = self.source._index.get_position(version_id)
2957
copy_queue_records.append((version_id, index_memo))
2958
copy_queue.append((version_id, options, parents))
2959
copy_set.add(version_id)
2961
# data suck the join:
2963
total = len(version_list)
2966
for (version_id, raw_data, _), \
2967
(version_id2, options, parents) in \
2968
izip(self.source._data.read_records_iter_raw(copy_queue_records),
2970
if not (version_id == version_id2):
2971
raise AssertionError('logic error, inconsistent results')
2973
pb.update("Joining knit", count, total)
2975
size, raw_data = converter(raw_data, version_id, options,
2978
size = len(raw_data)
2979
raw_records.append((version_id, options, parents, size))
2980
raw_datum.append(raw_data)
2981
self.target._add_raw_records(raw_records, ''.join(raw_datum))
2986
def _anno_to_plain_converter(self, raw_data, version_id, options,
2988
"""Convert annotated content to plain content."""
2989
data, digest = self.source._data._parse_record(version_id, raw_data)
2990
if 'fulltext' in options:
2991
content = self.source.factory.parse_fulltext(data, version_id)
2992
lines = self.target.factory.lower_fulltext(content)
2994
delta = self.source.factory.parse_line_delta(data, version_id,
2996
lines = self.target.factory.lower_line_delta(delta)
2997
return self.target._data._record_to_data(version_id, digest, lines)
3000
InterVersionedFile.register_optimiser(InterKnit)
3003
class WeaveToKnit(InterVersionedFile):
3004
"""Optimised code paths for weave to knit operations."""
3006
_matching_file_from_factory = bzrlib.weave.WeaveFile
3007
_matching_file_to_factory = staticmethod(make_file_knit)
3010
def is_compatible(source, target):
3011
"""Be compatible with weaves to knits."""
3013
return (isinstance(source, bzrlib.weave.Weave) and
3014
isinstance(target, KnitVersionedFile))
3015
except AttributeError:
3018
def join(self, pb=None, msg=None, version_ids=None, ignore_missing=False):
3019
"""See InterVersionedFile.join."""
3020
version_ids = self._get_source_version_ids(version_ids, ignore_missing)
3025
pb = ui.ui_factory.nested_progress_bar()
3027
version_ids = list(version_ids)
3029
self.source_ancestry = set(self.source.get_ancestry(version_ids))
3030
this_versions = set(self.target._index.get_versions())
3031
needed_versions = self.source_ancestry - this_versions
3033
if not needed_versions:
3035
full_list = topo_sort(
3036
self.source.get_parent_map(self.source.versions()))
3038
version_list = [i for i in full_list if (not self.target.has_version(i)
3039
and i in needed_versions)]
3043
total = len(version_list)
3044
parent_map = self.source.get_parent_map(version_list)
3045
for version_id in version_list:
3046
pb.update("Converting to knit", count, total)
3047
parents = parent_map[version_id]
3048
# check that its will be a consistent copy:
3049
for parent in parents:
3050
# if source has the parent, we must already have it
3051
if not self.target.has_version(parent):
3052
raise AssertionError("%r does not have parent %r"
3053
% (self.target, parent))
3054
self.target.add_lines(
3055
version_id, parents, self.source.get_lines(version_id))
3062
InterVersionedFile.register_optimiser(WeaveToKnit)
3065
# Deprecated, use PatienceSequenceMatcher instead
3066
KnitSequenceMatcher = patiencediff.PatienceSequenceMatcher
3069
def annotate_knit(knit, revision_id):
3070
"""Annotate a knit with no cached annotations.
3072
This implementation is for knits with no cached annotations.
3073
It will work for knits with cached annotations, but this is not
3076
annotator = _KnitAnnotator(knit)
3077
return iter(annotator.annotate(revision_id))
3080
class _KnitAnnotator(object):
3081
"""Build up the annotations for a text."""
3083
def __init__(self, knit):
3086
# Content objects, differs from fulltexts because of how final newlines
3087
# are treated by knits. the content objects here will always have a
3089
self._fulltext_contents = {}
3091
# Annotated lines of specific revisions
3092
self._annotated_lines = {}
3094
# Track the raw data for nodes that we could not process yet.
3095
# This maps the revision_id of the base to a list of children that will
3096
# annotated from it.
3097
self._pending_children = {}
3099
# Nodes which cannot be extracted
3100
self._ghosts = set()
3102
# Track how many children this node has, so we know if we need to keep
3104
self._annotate_children = {}
3105
self._compression_children = {}
3107
self._all_build_details = {}
3108
# The children => parent revision_id graph
3109
self._revision_id_graph = {}
3111
self._heads_provider = None
3113
self._nodes_to_keep_annotations = set()
3114
self._generations_until_keep = 100
3116
def set_generations_until_keep(self, value):
3117
"""Set the number of generations before caching a node.
3119
Setting this to -1 will cache every merge node, setting this higher
3120
will cache fewer nodes.
3122
self._generations_until_keep = value
3124
def _add_fulltext_content(self, revision_id, content_obj):
3125
self._fulltext_contents[revision_id] = content_obj
3126
# TODO: jam 20080305 It might be good to check the sha1digest here
3127
return content_obj.text()
3129
def _check_parents(self, child, nodes_to_annotate):
3130
"""Check if all parents have been processed.
3132
:param child: A tuple of (rev_id, parents, raw_content)
3133
:param nodes_to_annotate: If child is ready, add it to
3134
nodes_to_annotate, otherwise put it back in self._pending_children
3136
for parent_id in child[1]:
3137
if (parent_id not in self._annotated_lines):
3138
# This parent is present, but another parent is missing
3139
self._pending_children.setdefault(parent_id,
3143
# This one is ready to be processed
3144
nodes_to_annotate.append(child)
3146
def _add_annotation(self, revision_id, fulltext, parent_ids,
3147
left_matching_blocks=None):
3148
"""Add an annotation entry.
3150
All parents should already have been annotated.
3151
:return: A list of children that now have their parents satisfied.
3153
a = self._annotated_lines
3154
annotated_parent_lines = [a[p] for p in parent_ids]
3155
annotated_lines = list(annotate.reannotate(annotated_parent_lines,
3156
fulltext, revision_id, left_matching_blocks,
3157
heads_provider=self._get_heads_provider()))
3158
self._annotated_lines[revision_id] = annotated_lines
3159
for p in parent_ids:
3160
ann_children = self._annotate_children[p]
3161
ann_children.remove(revision_id)
3162
if (not ann_children
3163
and p not in self._nodes_to_keep_annotations):
3164
del self._annotated_lines[p]
3165
del self._all_build_details[p]
3166
if p in self._fulltext_contents:
3167
del self._fulltext_contents[p]
3168
# Now that we've added this one, see if there are any pending
3169
# deltas to be done, certainly this parent is finished
3170
nodes_to_annotate = []
3171
for child in self._pending_children.pop(revision_id, []):
3172
self._check_parents(child, nodes_to_annotate)
3173
return nodes_to_annotate
3175
def _get_build_graph(self, revision_id):
3176
"""Get the graphs for building texts and annotations.
3178
The data you need for creating a full text may be different than the
3179
data you need to annotate that text. (At a minimum, you need both
3180
parents to create an annotation, but only need 1 parent to generate the
3183
:return: A list of (revision_id, index_memo) records, suitable for
3184
passing to read_records_iter to start reading in the raw data fro/
3187
if revision_id in self._annotated_lines:
3190
pending = set([revision_id])
3195
# get all pending nodes
3197
this_iteration = pending
3198
build_details = self._knit._index.get_build_details(this_iteration)
3199
self._all_build_details.update(build_details)
3200
# new_nodes = self._knit._index._get_entries(this_iteration)
3202
for rev_id, details in build_details.iteritems():
3203
(index_memo, compression_parent, parents,
3204
record_details) = details
3205
self._revision_id_graph[rev_id] = parents
3206
records.append((rev_id, index_memo))
3207
# Do we actually need to check _annotated_lines?
3208
pending.update(p for p in parents
3209
if p not in self._all_build_details)
3210
if compression_parent:
3211
self._compression_children.setdefault(compression_parent,
3214
for parent in parents:
3215
self._annotate_children.setdefault(parent,
3217
num_gens = generation - kept_generation
3218
if ((num_gens >= self._generations_until_keep)
3219
and len(parents) > 1):
3220
kept_generation = generation
3221
self._nodes_to_keep_annotations.add(rev_id)
3223
missing_versions = this_iteration.difference(build_details.keys())
3224
self._ghosts.update(missing_versions)
3225
for missing_version in missing_versions:
3226
# add a key, no parents
3227
self._revision_id_graph[missing_version] = ()
3228
pending.discard(missing_version) # don't look for it
3229
if self._ghosts.intersection(self._compression_children):
3231
"We cannot have nodes which have a ghost compression parent:\n"
3233
"compression children: %r"
3234
% (self._ghosts, self._compression_children))
3235
# Cleanout anything that depends on a ghost so that we don't wait for
3236
# the ghost to show up
3237
for node in self._ghosts:
3238
if node in self._annotate_children:
3239
# We won't be building this node
3240
del self._annotate_children[node]
3241
# Generally we will want to read the records in reverse order, because
3242
# we find the parent nodes after the children
3246
def _annotate_records(self, records):
3247
"""Build the annotations for the listed records."""
3248
# We iterate in the order read, rather than a strict order requested
3249
# However, process what we can, and put off to the side things that
3250
# still need parents, cleaning them up when those parents are
3252
for (rev_id, record,
3253
digest) in self._knit._data.read_records_iter(records):
3254
if rev_id in self._annotated_lines:
3256
parent_ids = self._revision_id_graph[rev_id]
3257
parent_ids = [p for p in parent_ids if p not in self._ghosts]
3258
details = self._all_build_details[rev_id]
3259
(index_memo, compression_parent, parents,
3260
record_details) = details
3261
nodes_to_annotate = []
3262
# TODO: Remove the punning between compression parents, and
3263
# parent_ids, we should be able to do this without assuming
3265
if len(parent_ids) == 0:
3266
# There are no parents for this node, so just add it
3267
# TODO: This probably needs to be decoupled
3268
fulltext_content, delta = self._knit.factory.parse_record(
3269
rev_id, record, record_details, None)
3270
fulltext = self._add_fulltext_content(rev_id, fulltext_content)
3271
nodes_to_annotate.extend(self._add_annotation(rev_id, fulltext,
3272
parent_ids, left_matching_blocks=None))
3274
child = (rev_id, parent_ids, record)
3275
# Check if all the parents are present
3276
self._check_parents(child, nodes_to_annotate)
3277
while nodes_to_annotate:
3278
# Should we use a queue here instead of a stack?
3279
(rev_id, parent_ids, record) = nodes_to_annotate.pop()
3280
(index_memo, compression_parent, parents,
3281
record_details) = self._all_build_details[rev_id]
3282
if compression_parent is not None:
3283
comp_children = self._compression_children[compression_parent]
3284
if rev_id not in comp_children:
3285
raise AssertionError("%r not in compression children %r"
3286
% (rev_id, comp_children))
3287
# If there is only 1 child, it is safe to reuse this
3289
reuse_content = (len(comp_children) == 1
3290
and compression_parent not in
3291
self._nodes_to_keep_annotations)
3293
# Remove it from the cache since it will be changing
3294
parent_fulltext_content = self._fulltext_contents.pop(compression_parent)
3295
# Make sure to copy the fulltext since it might be
3297
parent_fulltext = list(parent_fulltext_content.text())
3299
parent_fulltext_content = self._fulltext_contents[compression_parent]
3300
parent_fulltext = parent_fulltext_content.text()
3301
comp_children.remove(rev_id)
3302
fulltext_content, delta = self._knit.factory.parse_record(
3303
rev_id, record, record_details,
3304
parent_fulltext_content,
3305
copy_base_content=(not reuse_content))
3306
fulltext = self._add_fulltext_content(rev_id,
3308
blocks = KnitContent.get_line_delta_blocks(delta,
3309
parent_fulltext, fulltext)
3311
fulltext_content = self._knit.factory.parse_fulltext(
3313
fulltext = self._add_fulltext_content(rev_id,
3316
nodes_to_annotate.extend(
3317
self._add_annotation(rev_id, fulltext, parent_ids,
3318
left_matching_blocks=blocks))
3320
def _get_heads_provider(self):
3321
"""Create a heads provider for resolving ancestry issues."""
3322
if self._heads_provider is not None:
3323
return self._heads_provider
3324
parent_provider = _mod_graph.DictParentsProvider(
3325
self._revision_id_graph)
3326
graph_obj = _mod_graph.Graph(parent_provider)
3327
head_cache = _mod_graph.FrozenHeadsCache(graph_obj)
3328
self._heads_provider = head_cache
3331
def annotate(self, revision_id):
3332
"""Return the annotated fulltext at the given revision.
3334
:param revision_id: The revision id for this file
3336
records = self._get_build_graph(revision_id)
3337
if revision_id in self._ghosts:
3338
raise errors.RevisionNotPresent(revision_id, self._knit)
3339
self._annotate_records(records)
3340
return self._annotated_lines[revision_id]
3344
from bzrlib._knit_load_data_c import _load_data_c as _load_data
3346
from bzrlib._knit_load_data_py import _load_data_py as _load_data