756
625
out.extend(lines)
759
def annotate(self, knit, key):
628
def annotate(self, knit, version_id):
760
629
annotator = _KnitAnnotator(knit)
761
return annotator.annotate_flat(key)
765
def make_file_factory(annotated, mapper):
766
"""Create a factory for creating a file based KnitVersionedFiles.
768
This is only functional enough to run interface tests, it doesn't try to
769
provide a full pack environment.
771
:param annotated: knit annotations are wanted.
772
:param mapper: The mapper from keys to paths.
774
def factory(transport):
775
index = _KndxIndex(transport, mapper, lambda:None, lambda:True, lambda:True)
776
access = _KnitKeyAccess(transport, mapper)
777
return KnitVersionedFiles(index, access, annotated=annotated)
781
def make_pack_factory(graph, delta, keylength):
782
"""Create a factory for creating a pack based VersionedFiles.
784
This is only functional enough to run interface tests, it doesn't try to
785
provide a full pack environment.
787
:param graph: Store a graph.
788
:param delta: Delta compress contents.
789
:param keylength: How long should keys be.
791
def factory(transport):
792
parents = graph or delta
798
max_delta_chain = 200
801
graph_index = _mod_index.InMemoryGraphIndex(reference_lists=ref_length,
802
key_elements=keylength)
803
stream = transport.open_write_stream('newpack')
804
writer = pack.ContainerWriter(stream.write)
806
index = _KnitGraphIndex(graph_index, lambda:True, parents=parents,
807
deltas=delta, add_callback=graph_index.add_nodes)
808
access = _DirectPackAccess({})
809
access.set_writer(writer, graph_index, (transport, 'newpack'))
810
result = KnitVersionedFiles(index, access,
811
max_delta_chain=max_delta_chain)
812
result.stream = stream
813
result.writer = writer
818
def cleanup_pack_knit(versioned_files):
819
versioned_files.stream.close()
820
versioned_files.writer.end()
823
def _get_total_build_size(self, keys, positions):
824
"""Determine the total bytes to build these keys.
826
(helper function because _KnitGraphIndex and _KndxIndex work the same, but
827
don't inherit from a common base.)
829
:param keys: Keys that we want to build
830
:param positions: dict of {key, (info, index_memo, comp_parent)} (such
831
as returned by _get_components_positions)
832
:return: Number of bytes to build those keys
834
all_build_index_memos = {}
838
for key in build_keys:
839
# This is mostly for the 'stacked' case
840
# Where we will be getting the data from a fallback
841
if key not in positions:
843
_, index_memo, compression_parent = positions[key]
844
all_build_index_memos[key] = index_memo
845
if compression_parent not in all_build_index_memos:
846
next_keys.add(compression_parent)
847
build_keys = next_keys
848
return sum([index_memo[2] for index_memo
849
in all_build_index_memos.itervalues()])
852
class KnitVersionedFiles(VersionedFiles):
853
"""Storage for many versioned files using knit compression.
855
Backend storage is managed by indices and data objects.
857
:ivar _index: A _KnitGraphIndex or similar that can describe the
858
parents, graph, compression and data location of entries in this
859
KnitVersionedFiles. Note that this is only the index for
860
*this* vfs; if there are fallbacks they must be queried separately.
863
def __init__(self, index, data_access, max_delta_chain=200,
864
annotated=False, reload_func=None):
865
"""Create a KnitVersionedFiles with index and data_access.
867
:param index: The index for the knit data.
868
:param data_access: The access object to store and retrieve knit
870
:param max_delta_chain: The maximum number of deltas to permit during
871
insertion. Set to 0 to prohibit the use of deltas.
872
:param annotated: Set to True to cause annotations to be calculated and
873
stored during insertion.
874
:param reload_func: An function that can be called if we think we need
875
to reload the pack listing and try again. See
876
'bzrlib.repofmt.pack_repo.AggregateIndex' for the signature.
630
return annotator.annotate(version_id)
633
def make_empty_knit(transport, relpath):
634
"""Construct a empty knit at the specified location."""
635
k = make_file_knit(transport, relpath, 'w', KnitPlainFactory)
638
def make_file_knit(name, transport, file_mode=None, access_mode='w',
639
factory=None, delta=True, create=False, create_parent_dir=False,
640
delay_create=False, dir_mode=None, get_scope=None):
641
"""Factory to create a KnitVersionedFile for a .knit/.kndx file pair."""
643
factory = KnitAnnotateFactory()
644
if get_scope is None:
645
get_scope = lambda:None
646
index = _KnitIndex(transport, name + INDEX_SUFFIX,
647
access_mode, create=create, file_mode=file_mode,
648
create_parent_dir=create_parent_dir, delay_create=delay_create,
649
dir_mode=dir_mode, get_scope=get_scope)
650
access = _KnitAccess(transport, name + DATA_SUFFIX, file_mode,
651
dir_mode, ((create and not len(index)) and delay_create),
653
return KnitVersionedFile(name, transport, factory=factory,
654
create=create, delay_create=delay_create, index=index,
655
access_method=access)
659
"""Return the suffixes used by file based knits."""
660
return [DATA_SUFFIX, INDEX_SUFFIX]
661
make_file_knit.get_suffixes = get_suffixes
664
class KnitVersionedFile(VersionedFile):
665
"""Weave-like structure with faster random access.
667
A knit stores a number of texts and a summary of the relationships
668
between them. Texts are identified by a string version-id. Texts
669
are normally stored and retrieved as a series of lines, but can
670
also be passed as single strings.
672
Lines are stored with the trailing newline (if any) included, to
673
avoid special cases for files with no final newline. Lines are
674
composed of 8-bit characters, not unicode. The combination of
675
these approaches should mean any 'binary' file can be safely
676
stored and retrieved.
679
def __init__(self, relpath, transport, file_mode=None,
680
factory=None, delta=True, create=False, create_parent_dir=False,
681
delay_create=False, dir_mode=None, index=None, access_method=None):
682
"""Construct a knit at location specified by relpath.
684
:param create: If not True, only open an existing knit.
685
:param create_parent_dir: If True, create the parent directory if
686
creating the file fails. (This is used for stores with
687
hash-prefixes that may not exist yet)
688
:param delay_create: The calling code is aware that the knit won't
689
actually be created until the first data is stored.
690
:param index: An index to use for the knit.
692
super(KnitVersionedFile, self).__init__()
693
self.transport = transport
694
self.filename = relpath
695
self.factory = factory or KnitAnnotateFactory()
698
self._max_delta_chain = 200
700
if None in (access_method, index):
701
raise ValueError("No default access_method or index any more")
878
702
self._index = index
879
self._access = data_access
880
self._max_delta_chain = max_delta_chain
882
self._factory = KnitAnnotateFactory()
884
self._factory = KnitPlainFactory()
885
self._fallback_vfs = []
886
self._reload_func = reload_func
703
_access = access_method
704
if create and not len(self) and not delay_create:
706
self._data = _KnitData(_access)
888
708
def __repr__(self):
889
return "%s(%r, %r)" % (
890
self.__class__.__name__,
894
def add_fallback_versioned_files(self, a_versioned_files):
895
"""Add a source of texts for texts not present in this knit.
897
:param a_versioned_files: A VersionedFiles object.
899
self._fallback_vfs.append(a_versioned_files)
901
def add_lines(self, key, parents, lines, parent_texts=None,
902
left_matching_blocks=None, nostore_sha=None, random_id=False,
904
"""See VersionedFiles.add_lines()."""
905
self._index._check_write_ok()
906
self._check_add(key, lines, random_id, check_content)
908
# The caller might pass None if there is no graph data, but kndx
909
# indexes can't directly store that, so we give them
910
# an empty tuple instead.
912
line_bytes = ''.join(lines)
913
return self._add(key, lines, parents,
914
parent_texts, left_matching_blocks, nostore_sha, random_id,
915
line_bytes=line_bytes)
917
def _add_text(self, key, parents, text, nostore_sha=None, random_id=False):
918
"""See VersionedFiles._add_text()."""
919
self._index._check_write_ok()
920
self._check_add(key, None, random_id, check_content=False)
921
if text.__class__ is not str:
922
raise errors.BzrBadParameterUnicode("text")
924
# The caller might pass None if there is no graph data, but kndx
925
# indexes can't directly store that, so we give them
926
# an empty tuple instead.
928
return self._add(key, None, parents,
929
None, None, nostore_sha, random_id,
932
def _add(self, key, lines, parents, parent_texts,
933
left_matching_blocks, nostore_sha, random_id,
935
"""Add a set of lines on top of version specified by parents.
937
Any versions not present will be converted into ghosts.
939
:param lines: A list of strings where each one is a single line (has a
940
single newline at the end of the string) This is now optional
941
(callers can pass None). It is left in its location for backwards
942
compatibility. It should ''.join(lines) must == line_bytes
943
:param line_bytes: A single string containing the content
945
We pass both lines and line_bytes because different routes bring the
946
values to this function. And for memory efficiency, we don't want to
947
have to split/join on-demand.
949
# first thing, if the content is something we don't need to store, find
951
digest = sha_string(line_bytes)
952
if nostore_sha == digest:
953
raise errors.ExistingContent
956
if parent_texts is None:
958
# Do a single query to ascertain parent presence; we only compress
959
# against parents in the same kvf.
960
present_parent_map = self._index.get_parent_map(parents)
961
for parent in parents:
962
if parent in present_parent_map:
963
present_parents.append(parent)
965
# Currently we can only compress against the left most present parent.
966
if (len(present_parents) == 0 or
967
present_parents[0] != parents[0]):
970
# To speed the extract of texts the delta chain is limited
971
# to a fixed number of deltas. This should minimize both
972
# I/O and the time spend applying deltas.
973
delta = self._check_should_delta(present_parents[0])
975
text_length = len(line_bytes)
978
# Note: line_bytes is not modified to add a newline, that is tracked
979
# via the no_eol flag. 'lines' *is* modified, because that is the
980
# general values needed by the Content code.
981
if line_bytes and line_bytes[-1] != '\n':
982
options.append('no-eol')
984
# Copy the existing list, or create a new one
986
lines = osutils.split_lines(line_bytes)
989
# Replace the last line with one that ends in a final newline
990
lines[-1] = lines[-1] + '\n'
992
lines = osutils.split_lines(line_bytes)
994
for element in key[:-1]:
995
if type(element) is not str:
996
raise TypeError("key contains non-strings: %r" % (key,))
998
key = key[:-1] + ('sha1:' + digest,)
999
elif type(key[-1]) is not str:
1000
raise TypeError("key contains non-strings: %r" % (key,))
1001
# Knit hunks are still last-element only
1002
version_id = key[-1]
1003
content = self._factory.make(lines, version_id)
1005
# Hint to the content object that its text() call should strip the
1007
content._should_strip_eol = True
1008
if delta or (self._factory.annotated and len(present_parents) > 0):
1009
# Merge annotations from parent texts if needed.
1010
delta_hunks = self._merge_annotations(content, present_parents,
1011
parent_texts, delta, self._factory.annotated,
1012
left_matching_blocks)
1015
options.append('line-delta')
1016
store_lines = self._factory.lower_line_delta(delta_hunks)
1017
size, bytes = self._record_to_data(key, digest,
1020
options.append('fulltext')
1021
# isinstance is slower and we have no hierarchy.
1022
if self._factory.__class__ is KnitPlainFactory:
1023
# Use the already joined bytes saving iteration time in
1025
dense_lines = [line_bytes]
1027
dense_lines.append('\n')
1028
size, bytes = self._record_to_data(key, digest,
1031
# get mixed annotation + content and feed it into the
1033
store_lines = self._factory.lower_fulltext(content)
1034
size, bytes = self._record_to_data(key, digest,
1037
access_memo = self._access.add_raw_records([(key, size)], bytes)[0]
1038
self._index.add_records(
1039
((key, options, access_memo, parents),),
1040
random_id=random_id)
1041
return digest, text_length, content
1043
def annotate(self, key):
1044
"""See VersionedFiles.annotate."""
1045
return self._factory.annotate(self, key)
1047
def get_annotator(self):
1048
return _KnitAnnotator(self)
1050
def check(self, progress_bar=None, keys=None):
1051
"""See VersionedFiles.check()."""
1053
return self._logical_check()
1055
# At the moment, check does not extra work over get_record_stream
1056
return self.get_record_stream(keys, 'unordered', True)
1058
def _logical_check(self):
1059
# This doesn't actually test extraction of everything, but that will
1060
# impact 'bzr check' substantially, and needs to be integrated with
1061
# care. However, it does check for the obvious problem of a delta with
1063
keys = self._index.keys()
1064
parent_map = self.get_parent_map(keys)
1066
if self._index.get_method(key) != 'fulltext':
1067
compression_parent = parent_map[key][0]
1068
if compression_parent not in parent_map:
1069
raise errors.KnitCorrupt(self,
1070
"Missing basis parent %s for %s" % (
1071
compression_parent, key))
1072
for fallback_vfs in self._fallback_vfs:
1073
fallback_vfs.check()
1075
def _check_add(self, key, lines, random_id, check_content):
1076
"""check that version_id and lines are safe to add."""
1077
version_id = key[-1]
1078
if version_id is not None:
1079
if contains_whitespace(version_id):
1080
raise InvalidRevisionId(version_id, self)
1081
self.check_not_reserved_id(version_id)
1082
# TODO: If random_id==False and the key is already present, we should
1083
# probably check that the existing content is identical to what is
1084
# being inserted, and otherwise raise an exception. This would make
1085
# the bundle code simpler.
1087
self._check_lines_not_unicode(lines)
1088
self._check_lines_are_lines(lines)
1090
def _check_header(self, key, line):
1091
rec = self._split_header(line)
1092
self._check_header_version(rec, key[-1])
1095
def _check_header_version(self, rec, version_id):
1096
"""Checks the header version on original format knit records.
1098
These have the last component of the key embedded in the record.
1100
if rec[1] != version_id:
1101
raise KnitCorrupt(self,
1102
'unexpected version, wanted %r, got %r' % (version_id, rec[1]))
1104
def _check_should_delta(self, parent):
709
return '%s(%s)' % (self.__class__.__name__,
710
self.transport.abspath(self.filename))
712
def _check_should_delta(self, first_parents):
1105
713
"""Iterate back through the parent listing, looking for a fulltext.
1107
715
This is used when we want to decide whether to add a delta or a new
1116
724
fulltext_size = None
725
delta_parents = first_parents
1117
726
for count in xrange(self._max_delta_chain):
1119
# Note that this only looks in the index of this particular
1120
# KnitVersionedFiles, not in the fallbacks. This ensures that
1121
# we won't store a delta spanning physical repository
1123
build_details = self._index.get_build_details([parent])
1124
parent_details = build_details[parent]
1125
except (RevisionNotPresent, KeyError), e:
1126
# Some basis is not locally present: always fulltext
1128
index_memo, compression_parent, _, _ = parent_details
1129
_, _, size = index_memo
1130
if compression_parent is None:
727
parent = delta_parents[0]
728
method = self._index.get_method(parent)
729
index, pos, size = self._index.get_position(parent)
730
if method == 'fulltext':
1131
731
fulltext_size = size
1133
733
delta_size += size
1134
# We don't explicitly check for presence because this is in an
1135
# inner loop, and if it's missing it'll fail anyhow.
1136
parent = compression_parent
734
delta_parents = self._index.get_parent_map([parent])[parent]
1138
736
# We couldn't find a fulltext, so we must create a new one
1140
# Simple heuristic - if the total I/O wold be greater as a delta than
1141
# the originally installed fulltext, we create a new fulltext.
1142
739
return fulltext_size > delta_size
1144
def _build_details_to_components(self, build_details):
1145
"""Convert a build_details tuple to a position tuple."""
1146
# record_details, access_memo, compression_parent
1147
return build_details[3], build_details[0], build_details[1]
1149
def _get_components_positions(self, keys, allow_missing=False):
1150
"""Produce a map of position data for the components of keys.
1152
This data is intended to be used for retrieving the knit records.
1154
A dict of key to (record_details, index_memo, next, parents) is
1156
method is the way referenced data should be applied.
1157
index_memo is the handle to pass to the data access to actually get the
1159
next is the build-parent of the version, or None for fulltexts.
1160
parents is the version_ids of the parents of this version
1162
:param allow_missing: If True do not raise an error on a missing component,
1166
pending_components = keys
1167
while pending_components:
1168
build_details = self._index.get_build_details(pending_components)
1169
current_components = set(pending_components)
1170
pending_components = set()
1171
for key, details in build_details.iteritems():
1172
(index_memo, compression_parent, parents,
1173
record_details) = details
1174
method = record_details[0]
1175
if compression_parent is not None:
1176
pending_components.add(compression_parent)
1177
component_data[key] = self._build_details_to_components(details)
1178
missing = current_components.difference(build_details)
1179
if missing and not allow_missing:
1180
raise errors.RevisionNotPresent(missing.pop(), self)
1181
return component_data
1183
def _get_content(self, key, parent_texts={}):
1184
"""Returns a content object that makes up the specified
1186
cached_version = parent_texts.get(key, None)
1187
if cached_version is not None:
1188
# Ensure the cache dict is valid.
1189
if not self.get_parent_map([key]):
1190
raise RevisionNotPresent(key, self)
1191
return cached_version
1192
generator = _VFContentMapGenerator(self, [key])
1193
return generator._get_content(key)
1195
def get_known_graph_ancestry(self, keys):
1196
"""Get a KnownGraph instance with the ancestry of keys."""
1197
parent_map, missing_keys = self._index.find_ancestry(keys)
1198
for fallback in self._fallback_vfs:
1199
if not missing_keys:
1201
(f_parent_map, f_missing_keys) = fallback._index.find_ancestry(
1203
parent_map.update(f_parent_map)
1204
missing_keys = f_missing_keys
1205
kg = _mod_graph.KnownGraph(parent_map)
1208
def get_parent_map(self, keys):
1209
"""Get a map of the graph parents of keys.
1211
:param keys: The keys to look up parents for.
1212
:return: A mapping from keys to parents. Absent keys are absent from
1215
return self._get_parent_map_with_sources(keys)[0]
1217
def _get_parent_map_with_sources(self, keys):
1218
"""Get a map of the parents of keys.
1220
:param keys: The keys to look up parents for.
1221
:return: A tuple. The first element is a mapping from keys to parents.
1222
Absent keys are absent from the mapping. The second element is a
1223
list with the locations each key was found in. The first element
1224
is the in-this-knit parents, the second the first fallback source,
1228
sources = [self._index] + self._fallback_vfs
1231
for source in sources:
1234
new_result = source.get_parent_map(missing)
1235
source_results.append(new_result)
1236
result.update(new_result)
1237
missing.difference_update(set(new_result))
1238
return result, source_results
1240
def _get_record_map(self, keys, allow_missing=False):
1241
"""Produce a dictionary of knit records.
1243
:return: {key:(record, record_details, digest, next)}
1245
data returned from read_records (a KnitContentobject)
1247
opaque information to pass to parse_record
1249
SHA1 digest of the full text after all steps are done
1251
build-parent of the version, i.e. the leftmost ancestor.
1252
Will be None if the record is not a delta.
1253
:param keys: The keys to build a map for
1254
:param allow_missing: If some records are missing, rather than
1255
error, just return the data that could be generated.
1257
raw_map = self._get_record_map_unparsed(keys,
1258
allow_missing=allow_missing)
1259
return self._raw_map_to_record_map(raw_map)
1261
def _raw_map_to_record_map(self, raw_map):
1262
"""Parse the contents of _get_record_map_unparsed.
1264
:return: see _get_record_map.
1268
data, record_details, next = raw_map[key]
1269
content, digest = self._parse_record(key[-1], data)
1270
result[key] = content, record_details, digest, next
1273
def _get_record_map_unparsed(self, keys, allow_missing=False):
1274
"""Get the raw data for reconstructing keys without parsing it.
1276
:return: A dict suitable for parsing via _raw_map_to_record_map.
1277
key-> raw_bytes, (method, noeol), compression_parent
1279
# This retries the whole request if anything fails. Potentially we
1280
# could be a bit more selective. We could track the keys whose records
1281
# we have successfully found, and then only request the new records
1282
# from there. However, _get_components_positions grabs the whole build
1283
# chain, which means we'll likely try to grab the same records again
1284
# anyway. Also, can the build chains change as part of a pack
1285
# operation? We wouldn't want to end up with a broken chain.
1288
position_map = self._get_components_positions(keys,
1289
allow_missing=allow_missing)
1290
# key = component_id, r = record_details, i_m = index_memo,
1292
records = [(key, i_m) for key, (r, i_m, n)
1293
in position_map.iteritems()]
1294
# Sort by the index memo, so that we request records from the
1295
# same pack file together, and in forward-sorted order
1296
records.sort(key=operator.itemgetter(1))
1298
for key, data in self._read_records_iter_unchecked(records):
1299
(record_details, index_memo, next) = position_map[key]
1300
raw_record_map[key] = data, record_details, next
1301
return raw_record_map
1302
except errors.RetryWithNewPacks, e:
1303
self._access.reload_or_raise(e)
1306
def _split_by_prefix(cls, keys):
1307
"""For the given keys, split them up based on their prefix.
1309
To keep memory pressure somewhat under control, split the
1310
requests back into per-file-id requests, otherwise "bzr co"
1311
extracts the full tree into memory before writing it to disk.
1312
This should be revisited if _get_content_maps() can ever cross
1315
The keys for a given file_id are kept in the same relative order.
1316
Ordering between file_ids is not, though prefix_order will return the
1317
order that the key was first seen.
1319
:param keys: An iterable of key tuples
1320
:return: (split_map, prefix_order)
1321
split_map A dictionary mapping prefix => keys
1322
prefix_order The order that we saw the various prefixes
1324
split_by_prefix = {}
1332
if prefix in split_by_prefix:
1333
split_by_prefix[prefix].append(key)
1335
split_by_prefix[prefix] = [key]
1336
prefix_order.append(prefix)
1337
return split_by_prefix, prefix_order
1339
def _group_keys_for_io(self, keys, non_local_keys, positions,
1340
_min_buffer_size=_STREAM_MIN_BUFFER_SIZE):
1341
"""For the given keys, group them into 'best-sized' requests.
1343
The idea is to avoid making 1 request per file, but to never try to
1344
unpack an entire 1.5GB source tree in a single pass. Also when
1345
possible, we should try to group requests to the same pack file
1348
:return: list of (keys, non_local) tuples that indicate what keys
1349
should be fetched next.
1351
# TODO: Ideally we would group on 2 factors. We want to extract texts
1352
# from the same pack file together, and we want to extract all
1353
# the texts for a given build-chain together. Ultimately it
1354
# probably needs a better global view.
1355
total_keys = len(keys)
1356
prefix_split_keys, prefix_order = self._split_by_prefix(keys)
1357
prefix_split_non_local_keys, _ = self._split_by_prefix(non_local_keys)
1359
cur_non_local = set()
1363
for prefix in prefix_order:
1364
keys = prefix_split_keys[prefix]
1365
non_local = prefix_split_non_local_keys.get(prefix, [])
1367
this_size = self._index._get_total_build_size(keys, positions)
1368
cur_size += this_size
1369
cur_keys.extend(keys)
1370
cur_non_local.update(non_local)
1371
if cur_size > _min_buffer_size:
1372
result.append((cur_keys, cur_non_local))
1373
sizes.append(cur_size)
1375
cur_non_local = set()
1378
result.append((cur_keys, cur_non_local))
1379
sizes.append(cur_size)
1382
def get_record_stream(self, keys, ordering, include_delta_closure):
1383
"""Get a stream of records for keys.
1385
:param keys: The keys to include.
741
def _check_write_ok(self):
742
return self._index._check_write_ok()
744
def _add_raw_records(self, records, data):
745
"""Add all the records 'records' with data pre-joined in 'data'.
747
:param records: A list of tuples(version_id, options, parents, size).
748
:param data: The data for the records. When it is written, the records
749
are adjusted to have pos pointing into data by the sum of
750
the preceding records sizes.
753
raw_record_sizes = [record[3] for record in records]
754
positions = self._data.add_raw_records(raw_record_sizes, data)
756
for (version_id, options, parents, _), access_memo in zip(
758
index_entries.append((version_id, options, access_memo, parents))
759
self._index.add_versions(index_entries)
761
def copy_to(self, name, transport):
762
"""See VersionedFile.copy_to()."""
763
# copy the current index to a temp index to avoid racing with local
765
transport.put_file_non_atomic(name + INDEX_SUFFIX + '.tmp',
766
self.transport.get(self._index._filename))
768
f = self._data._open_file()
770
transport.put_file(name + DATA_SUFFIX, f)
773
# move the copied index into place
774
transport.move(name + INDEX_SUFFIX + '.tmp', name + INDEX_SUFFIX)
776
def get_data_stream(self, required_versions):
777
"""Get a data stream for the specified versions.
779
Versions may be returned in any order, not necessarily the order
780
specified. They are returned in a partial order by compression
781
parent, so that the deltas can be applied as the data stream is
782
inserted; however note that compression parents will not be sent
783
unless they were specifically requested, as the client may already
786
:param required_versions: The exact set of versions to be extracted.
787
Unlike some other knit methods, this is not used to generate a
788
transitive closure, rather it is used precisely as given.
790
:returns: format_signature, list of (version, options, length, parents),
793
required_version_set = frozenset(required_versions)
795
# list of revisions that can just be sent without waiting for their
798
# map from revision to the children based on it
800
# first, read all relevant index data, enough to sort into the right
802
for version_id in required_versions:
803
options = self._index.get_options(version_id)
804
parents = self._index.get_parents_with_ghosts(version_id)
805
index_memo = self._index.get_position(version_id)
806
version_index[version_id] = (index_memo, options, parents)
807
if ('line-delta' in options
808
and parents[0] in required_version_set):
809
# must wait until the parent has been sent
810
deferred.setdefault(parents[0], []). \
813
# either a fulltext, or a delta whose parent the client did
814
# not ask for and presumably already has
815
ready_to_send.append(version_id)
816
# build a list of results to return, plus instructions for data to
818
copy_queue_records = []
819
temp_version_list = []
821
# XXX: pushing and popping lists may be a bit inefficient
822
version_id = ready_to_send.pop(0)
823
(index_memo, options, parents) = version_index[version_id]
824
copy_queue_records.append((version_id, index_memo))
825
none, data_pos, data_size = index_memo
826
temp_version_list.append((version_id, options, data_size,
828
if version_id in deferred:
829
# now we can send all the children of this revision - we could
830
# put them in anywhere, but we hope that sending them soon
831
# after the fulltext will give good locality in the receiver
832
ready_to_send[:0] = deferred.pop(version_id)
833
assert len(deferred) == 0, \
834
"Still have compressed child versions waiting to be sent"
835
# XXX: The stream format is such that we cannot stream it - we have to
836
# know the length of all the data a-priori.
838
result_version_list = []
839
for (version_id, raw_data, _), \
840
(version_id2, options, _, parents) in \
841
izip(self._data.read_records_iter_raw(copy_queue_records),
843
assert version_id == version_id2, \
844
'logic error, inconsistent results'
845
raw_datum.append(raw_data)
846
result_version_list.append(
847
(version_id, options, len(raw_data), parents))
848
# provide a callback to get data incrementally.
849
pseudo_file = StringIO(''.join(raw_datum))
852
return pseudo_file.read()
854
return pseudo_file.read(length)
855
return (self.get_format_signature(), result_version_list, read)
857
def get_record_stream(self, versions, ordering, include_delta_closure):
858
"""Get a stream of records for versions.
860
:param versions: The versions to include. Each version is a tuple
1386
862
:param ordering: Either 'unordered' or 'topological'. A topologically
1387
863
sorted stream has compression parents strictly before their
1391
867
:return: An iterator of ContentFactory objects, each of which is only
1392
868
valid until the iterator is advanced.
1394
# keys might be a generator
1398
if not self._index.has_graph:
1399
# Cannot sort when no graph has been stored.
1400
ordering = 'unordered'
1402
remaining_keys = keys
1405
keys = set(remaining_keys)
1406
for content_factory in self._get_remaining_record_stream(keys,
1407
ordering, include_delta_closure):
1408
remaining_keys.discard(content_factory.key)
1409
yield content_factory
1411
except errors.RetryWithNewPacks, e:
1412
self._access.reload_or_raise(e)
1414
def _get_remaining_record_stream(self, keys, ordering,
1415
include_delta_closure):
1416
"""This function is the 'retry' portion for get_record_stream."""
1417
870
if include_delta_closure:
1418
positions = self._get_components_positions(keys, allow_missing=True)
871
# Nb: what we should do is plan the data to stream to allow
872
# reconstruction of all the texts without excessive buffering,
873
# including re-sending common bases as needed. This makes the most
874
# sense when we start serialising these streams though, so for now
875
# we just fallback to individual text construction behind the
876
# abstraction barrier.
1420
build_details = self._index.get_build_details(keys)
1422
# (record_details, access_memo, compression_parent_key)
1423
positions = dict((key, self._build_details_to_components(details))
1424
for key, details in build_details.iteritems())
1425
absent_keys = keys.difference(set(positions))
1426
# There may be more absent keys : if we're missing the basis component
1427
# and are trying to include the delta closure.
1428
# XXX: We should not ever need to examine remote sources because we do
1429
# not permit deltas across versioned files boundaries.
1430
if include_delta_closure:
1431
needed_from_fallback = set()
1432
# Build up reconstructable_keys dict. key:True in this dict means
1433
# the key can be reconstructed.
1434
reconstructable_keys = {}
1438
chain = [key, positions[key][2]]
1440
needed_from_fallback.add(key)
1443
while chain[-1] is not None:
1444
if chain[-1] in reconstructable_keys:
1445
result = reconstructable_keys[chain[-1]]
1449
chain.append(positions[chain[-1]][2])
1451
# missing basis component
1452
needed_from_fallback.add(chain[-1])
1455
for chain_key in chain[:-1]:
1456
reconstructable_keys[chain_key] = result
1458
needed_from_fallback.add(key)
1459
880
# Double index lookups here : need a unified api ?
1460
global_map, parent_maps = self._get_parent_map_with_sources(keys)
1461
if ordering in ('topological', 'groupcompress'):
1462
if ordering == 'topological':
1463
# Global topological sort
1464
present_keys = tsort.topo_sort(global_map)
1466
present_keys = sort_groupcompress(global_map)
1467
# Now group by source:
1469
current_source = None
1470
for key in present_keys:
1471
for parent_map in parent_maps:
1472
if key in parent_map:
1473
key_source = parent_map
1475
if current_source is not key_source:
1476
source_keys.append((key_source, []))
1477
current_source = key_source
1478
source_keys[-1][1].append(key)
1480
if ordering != 'unordered':
1481
raise AssertionError('valid values for ordering are:'
1482
' "unordered", "groupcompress" or "topological" not: %r'
1484
# Just group by source; remote sources first.
1487
for parent_map in reversed(parent_maps):
1488
source_keys.append((parent_map, []))
1489
for key in parent_map:
1490
present_keys.append(key)
1491
source_keys[-1][1].append(key)
1492
# We have been requested to return these records in an order that
1493
# suits us. So we ask the index to give us an optimally sorted
1495
for source, sub_keys in source_keys:
1496
if source is parent_maps[0]:
1497
# Only sort the keys for this VF
1498
self._index._sort_keys_by_io(sub_keys, positions)
1499
absent_keys = keys - set(global_map)
1500
for key in absent_keys:
1501
yield AbsentContentFactory(key)
1502
# restrict our view to the keys we can answer.
1503
# XXX: Memory: TODO: batch data here to cap buffered data at (say) 1MB.
1504
# XXX: At that point we need to consider the impact of double reads by
1505
# utilising components multiple times.
1506
if include_delta_closure:
1507
# XXX: get_content_maps performs its own index queries; allow state
1509
non_local_keys = needed_from_fallback - absent_keys
1510
for keys, non_local_keys in self._group_keys_for_io(present_keys,
1513
generator = _VFContentMapGenerator(self, keys, non_local_keys,
1516
for record in generator.get_record_stream():
1519
for source, keys in source_keys:
1520
if source is parent_maps[0]:
1521
# this KnitVersionedFiles
1522
records = [(key, positions[key][1]) for key in keys]
1523
for key, raw_data in self._read_records_iter_unchecked(records):
1524
(record_details, index_memo, _) = positions[key]
1525
yield KnitContentFactory(key, global_map[key],
1526
record_details, None, raw_data, self._factory.annotated, None)
1528
vf = self._fallback_vfs[parent_maps.index(source) - 1]
1529
for record in vf.get_record_stream(keys, ordering,
1530
include_delta_closure):
1533
def get_sha1s(self, keys):
1534
"""See VersionedFiles.get_sha1s()."""
1536
record_map = self._get_record_map(missing, allow_missing=True)
1538
for key, details in record_map.iteritems():
1539
if key not in missing:
1541
# record entry 2 is the 'digest'.
1542
result[key] = details[2]
1543
missing.difference_update(set(result))
1544
for source in self._fallback_vfs:
1547
new_result = source.get_sha1s(missing)
1548
result.update(new_result)
1549
missing.difference_update(set(new_result))
881
parent_map = self.get_parent_map(versions)
882
absent_versions = set(versions) - set(parent_map)
883
if ordering == 'topological':
884
present_versions = topo_sort(parent_map)
886
# List comprehension to keep the requested order (as that seems
887
# marginally useful, at least until we start doing IO optimising
889
present_versions = [version for version in versions if version in
891
position_map = self._get_components_positions(present_versions)
892
# c = component_id, r = record_details, i_m = index_memo, n = next
893
records = [(version, position_map[version][1]) for version in
896
for version in absent_versions:
897
yield AbsentContentFactory((version,))
898
for version, raw_data, sha1 in \
899
self._data.read_records_iter_raw(records):
900
(record_details, index_memo, _) = position_map[version]
901
yield KnitContentFactory(version, parent_map[version],
902
record_details, sha1, raw_data, self.factory.annotated, knit)
904
def _extract_blocks(self, version_id, source, target):
905
if self._index.get_method(version_id) != 'line-delta':
907
parent, sha1, noeol, delta = self.get_delta(version_id)
908
return KnitContent.get_line_delta_blocks(delta, source, target)
910
def get_delta(self, version_id):
911
"""Get a delta for constructing version from some other version."""
912
self.check_not_reserved_id(version_id)
913
parents = self.get_parent_map([version_id])[version_id]
918
index_memo = self._index.get_position(version_id)
919
data, sha1 = self._data.read_records(((version_id, index_memo),))[version_id]
920
noeol = 'no-eol' in self._index.get_options(version_id)
921
if 'fulltext' == self._index.get_method(version_id):
922
new_content = self.factory.parse_fulltext(data, version_id)
923
if parent is not None:
924
reference_content = self._get_content(parent)
925
old_texts = reference_content.text()
928
new_texts = new_content.text()
929
delta_seq = patiencediff.PatienceSequenceMatcher(None, old_texts,
931
return parent, sha1, noeol, self._make_line_delta(delta_seq, new_content)
933
delta = self.factory.parse_line_delta(data, version_id)
934
return parent, sha1, noeol, delta
936
def get_format_signature(self):
937
"""See VersionedFile.get_format_signature()."""
938
if self.factory.annotated:
939
annotated_part = "annotated"
941
annotated_part = "plain"
942
return "knit-%s" % (annotated_part,)
944
def get_sha1s(self, version_ids):
945
"""See VersionedFile.get_sha1s()."""
946
record_map = self._get_record_map(version_ids)
947
# record entry 2 is the 'digest'.
948
return [record_map[v][2] for v in version_ids]
950
def insert_data_stream(self, (format, data_list, reader_callable)):
951
"""Insert knit records from a data stream into this knit.
953
If a version in the stream is already present in this knit, it will not
954
be inserted a second time. It will be checked for consistency with the
955
stored version however, and may cause a KnitCorrupt error to be raised
956
if the data in the stream disagrees with the already stored data.
958
:seealso: get_data_stream
960
if format != self.get_format_signature():
961
if 'knit' in debug.debug_flags:
963
'incompatible format signature inserting to %r', self)
964
source = self._knit_from_datastream(
965
(format, data_list, reader_callable))
966
stream = source.get_record_stream(source.versions(), 'unordered', False)
967
self.insert_record_stream(stream)
970
for version_id, options, length, parents in data_list:
971
if self.has_version(version_id):
972
# First check: the list of parents.
973
my_parents = self.get_parents_with_ghosts(version_id)
974
if tuple(my_parents) != tuple(parents):
975
# XXX: KnitCorrupt is not quite the right exception here.
978
'parents list %r from data stream does not match '
979
'already recorded parents %r for %s'
980
% (parents, my_parents, version_id))
982
# Also check the SHA-1 of the fulltext this content will
984
raw_data = reader_callable(length)
985
my_fulltext_sha1 = self.get_sha1s([version_id])[0]
986
df, rec = self._data._parse_record_header(version_id, raw_data)
987
stream_fulltext_sha1 = rec[3]
988
if my_fulltext_sha1 != stream_fulltext_sha1:
989
# Actually, we don't know if it's this knit that's corrupt,
990
# or the data stream we're trying to insert.
992
self.filename, 'sha-1 does not match %s' % version_id)
994
if 'line-delta' in options:
995
# Make sure that this knit record is actually useful: a
996
# line-delta is no use unless we have its parent.
997
# Fetching from a broken repository with this problem
998
# shouldn't break the target repository.
1000
# See https://bugs.launchpad.net/bzr/+bug/164443
1001
if not self._index.has_version(parents[0]):
1004
'line-delta from stream '
1007
'missing parent %s\n'
1008
'Try running "bzr check" '
1009
'on the source repository, and "bzr reconcile" '
1011
(version_id, parents[0]))
1013
# We received a line-delta record for a non-delta knit.
1014
# Convert it to a fulltext.
1015
gzip_bytes = reader_callable(length)
1016
lines, sha1 = self._data._parse_record(
1017
version_id, gzip_bytes)
1018
delta = self.factory.parse_line_delta(lines,
1020
content = self.factory.make(
1021
self.get_lines(parents[0]), parents[0])
1022
content.apply_delta(delta, version_id)
1023
digest, len, content = self.add_lines(
1024
version_id, parents, content.text())
1026
raise errors.VersionedFileInvalidChecksum(version)
1029
self._add_raw_records(
1030
[(version_id, options, parents, length)],
1031
reader_callable(length))
1033
def _knit_from_datastream(self, (format, data_list, reader_callable)):
1034
"""Create a knit object from a data stream.
1036
This method exists to allow conversion of data streams that do not
1037
match the signature of this knit. Generally it will be slower and use
1038
more memory to use this method to insert data, but it will work.
1040
:seealso: get_data_stream for details on datastreams.
1041
:return: A knit versioned file which can be used to join the datastream
1044
if format == "knit-plain":
1045
factory = KnitPlainFactory()
1046
elif format == "knit-annotated":
1047
factory = KnitAnnotateFactory()
1049
raise errors.KnitDataStreamUnknown(format)
1050
index = _StreamIndex(data_list, self._index)
1051
access = _StreamAccess(reader_callable, index, self, factory)
1052
return KnitVersionedFile(self.filename, self.transport,
1053
factory=factory, index=index, access_method=access)
1552
1055
def insert_record_stream(self, stream):
1553
"""Insert a record stream into this container.
1056
"""Insert a record stream into this versioned file.
1555
:param stream: A stream of records to insert.
1058
:param stream: A stream of records to insert.
1557
:seealso VersionedFiles.get_record_stream:
1060
:seealso VersionedFile.get_record_stream:
1559
1062
def get_adapter(adapter_key):
1654
1118
# deprecated format this is tolerable. It can be fixed if
1655
1119
# needed by in the kndx index support raising on a duplicate
1656
1120
# add with identical parents and options.
1657
access_memo = self._access.add_raw_records(
1658
[(record.key, len(bytes))], bytes)[0]
1659
index_entry = (record.key, options, access_memo, parents)
1121
access_memo = self._data.add_raw_records([len(bytes)], bytes)[0]
1122
index_entry = (record.key[0], options, access_memo, parents)
1660
1124
if 'fulltext' not in options:
1661
# Not a fulltext, so we need to make sure the compression
1662
# parent will also be present.
1663
# Note that pack backed knits don't need to buffer here
1664
# because they buffer all writes to the transaction level,
1665
# but we don't expose that difference at the index level. If
1666
# the query here has sufficient cost to show up in
1667
# profiling we should do that.
1669
# They're required to be physically in this
1670
# KnitVersionedFiles, not in a fallback.
1671
if not self._index.has_key(compression_parent):
1125
basis_parent = parents[0]
1126
if not self.has_version(basis_parent):
1672
1127
pending = buffered_index_entries.setdefault(
1673
compression_parent, [])
1674
1129
pending.append(index_entry)
1675
1130
buffered = True
1676
1131
if not buffered:
1677
self._index.add_records([index_entry])
1678
elif record.storage_kind == 'chunked':
1679
self.add_lines(record.key, parents,
1680
osutils.chunks_to_lines(record.get_bytes_as('chunked')))
1132
self._index.add_versions([index_entry])
1133
elif record.storage_kind == 'fulltext':
1134
self.add_lines(record.key[0], parents,
1135
split_lines(record.get_bytes_as('fulltext')))
1682
# Not suitable for direct insertion as a
1683
# delta, either because it's not the right format, or this
1684
# KnitVersionedFiles doesn't permit deltas (_max_delta_chain ==
1685
# 0) or because it depends on a base only present in the
1687
self._access.flush()
1689
# Try getting a fulltext directly from the record.
1690
bytes = record.get_bytes_as('fulltext')
1691
except errors.UnavailableRepresentation:
1692
adapter_key = record.storage_kind, 'fulltext'
1693
adapter = get_adapter(adapter_key)
1694
bytes = adapter.get_bytes(record)
1695
lines = split_lines(bytes)
1697
self.add_lines(record.key, parents, lines)
1137
adapter_key = record.storage_kind, 'fulltext'
1138
adapter = get_adapter(adapter_key)
1139
lines = split_lines(adapter.get_bytes(
1140
record, record.get_bytes_as(record.storage_kind)))
1142
self.add_lines(record.key[0], parents, lines)
1698
1143
except errors.RevisionAlreadyPresent:
1700
1145
# Add any records whose basis parent is now available.
1702
added_keys = [record.key]
1704
key = added_keys.pop(0)
1705
if key in buffered_index_entries:
1706
index_entries = buffered_index_entries[key]
1707
self._index.add_records(index_entries)
1709
[index_entry[0] for index_entry in index_entries])
1710
del buffered_index_entries[key]
1146
added_keys = [record.key[0]]
1148
key = added_keys.pop(0)
1149
if key in buffered_index_entries:
1150
index_entries = buffered_index_entries[key]
1151
self._index.add_versions(index_entries)
1153
[index_entry[0] for index_entry in index_entries])
1154
del buffered_index_entries[key]
1155
# If there were any deltas which had a missing basis parent, error.
1711
1156
if buffered_index_entries:
1712
# There were index entries buffered at the end of the stream,
1713
# So these need to be added (if the index supports holding such
1714
# entries for later insertion)
1716
for key in buffered_index_entries:
1717
index_entries = buffered_index_entries[key]
1718
all_entries.extend(index_entries)
1719
self._index.add_records(
1720
all_entries, missing_compression_parents=True)
1722
def get_missing_compression_parent_keys(self):
1723
"""Return an iterable of keys of missing compression parents.
1725
Check this after calling insert_record_stream to find out if there are
1726
any missing compression parents. If there are, the records that
1727
depend on them are not able to be inserted safely. For atomic
1728
KnitVersionedFiles built on packs, the transaction should be aborted or
1729
suspended - commit will fail at this point. Nonatomic knits will error
1730
earlier because they have no staging area to put pending entries into.
1732
return self._index.get_missing_compression_parents()
1734
def iter_lines_added_or_present_in_keys(self, keys, pb=None):
1735
"""Iterate over the lines in the versioned files from keys.
1737
This may return lines from other keys. Each item the returned
1738
iterator yields is a tuple of a line and a text version that that line
1739
is present in (not introduced in).
1741
Ordering of results is in whatever order is most suitable for the
1742
underlying storage format.
1744
If a progress bar is supplied, it may be used to indicate progress.
1745
The caller is responsible for cleaning up progress bars (because this
1749
* Lines are normalised by the underlying store: they will all have \\n
1751
* Lines are returned in arbitrary order.
1752
* If a requested key did not change any lines (or didn't have any
1753
lines), it may not be mentioned at all in the result.
1755
:param pb: Progress bar supplied by caller.
1756
:return: An iterator over (line, key).
1759
pb = ui.ui_factory.nested_progress_bar()
1765
# we don't care about inclusions, the caller cares.
1766
# but we need to setup a list of records to visit.
1767
# we need key, position, length
1769
build_details = self._index.get_build_details(keys)
1770
for key, details in build_details.iteritems():
1772
key_records.append((key, details[0]))
1773
records_iter = enumerate(self._read_records_iter(key_records))
1774
for (key_idx, (key, data, sha_value)) in records_iter:
1775
pb.update('Walking content', key_idx, total)
1776
compression_parent = build_details[key][1]
1777
if compression_parent is None:
1779
line_iterator = self._factory.get_fulltext_content(data)
1782
line_iterator = self._factory.get_linedelta_content(data)
1783
# Now that we are yielding the data for this key, remove it
1786
# XXX: It might be more efficient to yield (key,
1787
# line_iterator) in the future. However for now, this is a
1788
# simpler change to integrate into the rest of the
1789
# codebase. RBC 20071110
1790
for line in line_iterator:
1793
except errors.RetryWithNewPacks, e:
1794
self._access.reload_or_raise(e)
1795
# If there are still keys we've not yet found, we look in the fallback
1796
# vfs, and hope to find them there. Note that if the keys are found
1797
# but had no changes or no content, the fallback may not return
1799
if keys and not self._fallback_vfs:
1800
# XXX: strictly the second parameter is meant to be the file id
1801
# but it's not easily accessible here.
1802
raise RevisionNotPresent(keys, repr(self))
1803
for source in self._fallback_vfs:
1807
for line, key in source.iter_lines_added_or_present_in_keys(keys):
1808
source_keys.add(key)
1810
keys.difference_update(source_keys)
1811
pb.update('Walking content', total, total)
1813
def _make_line_delta(self, delta_seq, new_content):
1814
"""Generate a line delta from delta_seq and new_content."""
1816
for op in delta_seq.get_opcodes():
1817
if op[0] == 'equal':
1819
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
1157
raise errors.RevisionNotPresent(buffered_index_entries.keys()[0],
1161
"""See VersionedFile.versions."""
1162
if 'evil' in debug.debug_flags:
1163
trace.mutter_callsite(2, "versions scales with size of history")
1164
return self._index.get_versions()
1166
def has_version(self, version_id):
1167
"""See VersionedFile.has_version."""
1168
if 'evil' in debug.debug_flags:
1169
trace.mutter_callsite(2, "has_version is a LBYL scenario")
1170
return self._index.has_version(version_id)
1172
__contains__ = has_version
1822
1174
def _merge_annotations(self, content, parents, parent_texts={},
1823
1175
delta=None, annotated=None,
1824
1176
left_matching_blocks=None):
1825
"""Merge annotations for content and generate deltas.
1827
This is done by comparing the annotations based on changes to the text
1828
and generating a delta on the resulting full texts. If annotations are
1829
not being created then a simple delta is created.
1177
"""Merge annotations for content. This is done by comparing
1178
the annotations based on changed to the text.
1831
1180
if left_matching_blocks is not None:
1832
1181
delta_seq = diff._PrematchedMatcher(left_matching_blocks)
1834
1183
delta_seq = None
1836
for parent_key in parents:
1837
merge_content = self._get_content(parent_key, parent_texts)
1838
if (parent_key == parents[0] and delta_seq is not None):
1185
for parent_id in parents:
1186
merge_content = self._get_content(parent_id, parent_texts)
1187
if (parent_id == parents[0] and delta_seq is not None):
1839
1188
seq = delta_seq
1841
1190
seq = patiencediff.PatienceSequenceMatcher(
1864
1205
None, old_texts, new_texts)
1865
1206
return self._make_line_delta(delta_seq, content)
1867
def _parse_record(self, version_id, data):
1868
"""Parse an original format knit record.
1870
These have the last element of the key only present in the stored data.
1872
rec, record_contents = self._parse_record_unchecked(data)
1873
self._check_header_version(rec, version_id)
1874
return record_contents, rec[3]
1876
def _parse_record_header(self, key, raw_data):
1877
"""Parse a record header for consistency.
1879
:return: the header and the decompressor stream.
1880
as (stream, header_record)
1882
df = tuned_gzip.GzipFile(mode='rb', fileobj=StringIO(raw_data))
1885
rec = self._check_header(key, df.readline())
1886
except Exception, e:
1887
raise KnitCorrupt(self,
1888
"While reading {%s} got %s(%s)"
1889
% (key, e.__class__.__name__, str(e)))
1892
def _parse_record_unchecked(self, data):
1894
# 4168 calls in 2880 217 internal
1895
# 4168 calls to _parse_record_header in 2121
1896
# 4168 calls to readlines in 330
1897
df = tuned_gzip.GzipFile(mode='rb', fileobj=StringIO(data))
1899
record_contents = df.readlines()
1900
except Exception, e:
1901
raise KnitCorrupt(self, "Corrupt compressed record %r, got %s(%s)" %
1902
(data, e.__class__.__name__, str(e)))
1903
header = record_contents.pop(0)
1904
rec = self._split_header(header)
1905
last_line = record_contents.pop()
1906
if len(record_contents) != int(rec[2]):
1907
raise KnitCorrupt(self,
1908
'incorrect number of lines %s != %s'
1909
' for version {%s} %s'
1910
% (len(record_contents), int(rec[2]),
1911
rec[1], record_contents))
1912
if last_line != 'end %s\n' % rec[1]:
1913
raise KnitCorrupt(self,
1914
'unexpected version end line %r, wanted %r'
1915
% (last_line, rec[1]))
1917
return rec, record_contents
1919
def _read_records_iter(self, records):
1920
"""Read text records from data file and yield result.
1922
The result will be returned in whatever is the fastest to read.
1923
Not by the order requested. Also, multiple requests for the same
1924
record will only yield 1 response.
1925
:param records: A list of (key, access_memo) entries
1926
:return: Yields (key, contents, digest) in the order
1927
read, not the order requested
1932
# XXX: This smells wrong, IO may not be getting ordered right.
1933
needed_records = sorted(set(records), key=operator.itemgetter(1))
1934
if not needed_records:
1937
# The transport optimizes the fetching as well
1938
# (ie, reads continuous ranges.)
1939
raw_data = self._access.get_raw_records(
1940
[index_memo for key, index_memo in needed_records])
1942
for (key, index_memo), data in \
1943
izip(iter(needed_records), raw_data):
1944
content, digest = self._parse_record(key[-1], data)
1945
yield key, content, digest
1947
def _read_records_iter_raw(self, records):
1948
"""Read text records from data file and yield raw data.
1950
This unpacks enough of the text record to validate the id is
1951
as expected but thats all.
1953
Each item the iterator yields is (key, bytes,
1954
expected_sha1_of_full_text).
1956
for key, data in self._read_records_iter_unchecked(records):
1957
# validate the header (note that we can only use the suffix in
1958
# current knit records).
1959
df, rec = self._parse_record_header(key, data)
1961
yield key, data, rec[3]
1963
def _read_records_iter_unchecked(self, records):
1964
"""Read text records from data file and yield raw data.
1966
No validation is done.
1968
Yields tuples of (key, data).
1970
# setup an iterator of the external records:
1971
# uses readv so nice and fast we hope.
1973
# grab the disk data needed.
1974
needed_offsets = [index_memo for key, index_memo
1976
raw_records = self._access.get_raw_records(needed_offsets)
1978
for key, index_memo in records:
1979
data = raw_records.next()
1982
def _record_to_data(self, key, digest, lines, dense_lines=None):
1983
"""Convert key, digest, lines into a raw data block.
1985
:param key: The key of the record. Currently keys are always serialised
1986
using just the trailing component.
1987
:param dense_lines: The bytes of lines but in a denser form. For
1988
instance, if lines is a list of 1000 bytestrings each ending in \n,
1989
dense_lines may be a list with one line in it, containing all the
1990
1000's lines and their \n's. Using dense_lines if it is already
1991
known is a win because the string join to create bytes in this
1992
function spends less time resizing the final string.
1993
:return: (len, a StringIO instance with the raw data ready to read.)
1995
chunks = ["version %s %d %s\n" % (key[-1], len(lines), digest)]
1996
chunks.extend(dense_lines or lines)
1997
chunks.append("end %s\n" % key[-1])
1998
for chunk in chunks:
1999
if type(chunk) is not str:
2000
raise AssertionError(
2001
'data must be plain bytes was %s' % type(chunk))
2002
if lines and lines[-1][-1] != '\n':
2003
raise ValueError('corrupt lines value %r' % lines)
2004
compressed_bytes = tuned_gzip.chunks_to_gzip(chunks)
2005
return len(compressed_bytes), compressed_bytes
2007
def _split_header(self, line):
2010
raise KnitCorrupt(self,
2011
'unexpected number of elements in record header')
2015
"""See VersionedFiles.keys."""
2016
if 'evil' in debug.debug_flags:
2017
trace.mutter_callsite(2, "keys scales with size of history")
2018
sources = [self._index] + self._fallback_vfs
2020
for source in sources:
2021
result.update(source.keys())
2025
class _ContentMapGenerator(object):
2026
"""Generate texts or expose raw deltas for a set of texts."""
2028
def __init__(self, ordering='unordered'):
2029
self._ordering = ordering
2031
def _get_content(self, key):
2032
"""Get the content object for key."""
2033
# Note that _get_content is only called when the _ContentMapGenerator
2034
# has been constructed with just one key requested for reconstruction.
2035
if key in self.nonlocal_keys:
2036
record = self.get_record_stream().next()
2037
# Create a content object on the fly
2038
lines = osutils.chunks_to_lines(record.get_bytes_as('chunked'))
2039
return PlainKnitContent(lines, record.key)
1208
def _make_line_delta(self, delta_seq, new_content):
1209
"""Generate a line delta from delta_seq and new_content."""
1211
for op in delta_seq.get_opcodes():
1212
if op[0] == 'equal':
1214
diff_hunks.append((op[1], op[2], op[4]-op[3], new_content._lines[op[3]:op[4]]))
1217
def _get_components_positions(self, version_ids):
1218
"""Produce a map of position data for the components of versions.
1220
This data is intended to be used for retrieving the knit records.
1222
A dict of version_id to (record_details, index_memo, next, parents) is
1224
method is the way referenced data should be applied.
1225
index_memo is the handle to pass to the data access to actually get the
1227
next is the build-parent of the version, or None for fulltexts.
1228
parents is the version_ids of the parents of this version
1231
pending_components = version_ids
1232
while pending_components:
1233
build_details = self._index.get_build_details(pending_components)
1234
current_components = set(pending_components)
1235
pending_components = set()
1236
for version_id, details in build_details.iteritems():
1237
(index_memo, compression_parent, parents,
1238
record_details) = details
1239
method = record_details[0]
1240
if compression_parent is not None:
1241
pending_components.add(compression_parent)
1242
component_data[version_id] = (record_details, index_memo,
1244
missing = current_components.difference(build_details)
1246
raise errors.RevisionNotPresent(missing.pop(), self.filename)
1247
return component_data
1249
def _get_content(self, version_id, parent_texts={}):
1250
"""Returns a content object that makes up the specified
1252
cached_version = parent_texts.get(version_id, None)
1253
if cached_version is not None:
1254
if not self.has_version(version_id):
1255
raise RevisionNotPresent(version_id, self.filename)
1256
return cached_version
1258
text_map, contents_map = self._get_content_maps([version_id])
1259
return contents_map[version_id]
1261
def _check_versions_present(self, version_ids):
1262
"""Check that all specified versions are present."""
1263
self._index.check_versions_present(version_ids)
1265
def _add_lines_with_ghosts(self, version_id, parents, lines, parent_texts,
1266
nostore_sha, random_id, check_content, left_matching_blocks):
1267
"""See VersionedFile.add_lines_with_ghosts()."""
1268
self._check_add(version_id, lines, random_id, check_content)
1269
return self._add(version_id, lines, parents, self.delta,
1270
parent_texts, left_matching_blocks, nostore_sha, random_id)
1272
def _add_lines(self, version_id, parents, lines, parent_texts,
1273
left_matching_blocks, nostore_sha, random_id, check_content):
1274
"""See VersionedFile.add_lines."""
1275
self._check_add(version_id, lines, random_id, check_content)
1276
self._check_versions_present(parents)
1277
return self._add(version_id, lines[:], parents, self.delta,
1278
parent_texts, left_matching_blocks, nostore_sha, random_id)
1280
def _check_add(self, version_id, lines, random_id, check_content):
1281
"""check that version_id and lines are safe to add."""
1282
if contains_whitespace(version_id):
1283
raise InvalidRevisionId(version_id, self.filename)
1284
self.check_not_reserved_id(version_id)
1285
# Technically this could be avoided if we are happy to allow duplicate
1286
# id insertion when other things than bzr core insert texts, but it
1287
# seems useful for folk using the knit api directly to have some safety
1288
# blanket that we can disable.
1289
if not random_id and self.has_version(version_id):
1290
raise RevisionAlreadyPresent(version_id, self.filename)
1292
self._check_lines_not_unicode(lines)
1293
self._check_lines_are_lines(lines)
1295
def _add(self, version_id, lines, parents, delta, parent_texts,
1296
left_matching_blocks, nostore_sha, random_id):
1297
"""Add a set of lines on top of version specified by parents.
1299
If delta is true, compress the text as a line-delta against
1302
Any versions not present will be converted into ghosts.
1304
# first thing, if the content is something we don't need to store, find
1306
line_bytes = ''.join(lines)
1307
digest = sha_string(line_bytes)
1308
if nostore_sha == digest:
1309
raise errors.ExistingContent
1311
present_parents = []
1312
if parent_texts is None:
1314
for parent in parents:
1315
if self.has_version(parent):
1316
present_parents.append(parent)
1318
# can only compress against the left most present parent.
1320
(len(present_parents) == 0 or
1321
present_parents[0] != parents[0])):
1324
text_length = len(line_bytes)
1327
if lines[-1][-1] != '\n':
1328
# copy the contents of lines.
1330
options.append('no-eol')
1331
lines[-1] = lines[-1] + '\n'
1335
# To speed the extract of texts the delta chain is limited
1336
# to a fixed number of deltas. This should minimize both
1337
# I/O and the time spend applying deltas.
1338
delta = self._check_should_delta(present_parents)
1340
assert isinstance(version_id, str)
1341
content = self.factory.make(lines, version_id)
1342
if delta or (self.factory.annotated and len(present_parents) > 0):
1343
# Merge annotations from parent texts if needed.
1344
delta_hunks = self._merge_annotations(content, present_parents,
1345
parent_texts, delta, self.factory.annotated,
1346
left_matching_blocks)
1349
options.append('line-delta')
1350
store_lines = self.factory.lower_line_delta(delta_hunks)
1351
size, bytes = self._data._record_to_data(version_id, digest,
2041
# local keys we can ask for directly
2042
return self._get_one_work(key)
2044
def get_record_stream(self):
2045
"""Get a record stream for the keys requested during __init__."""
2046
for record in self._work():
2050
"""Produce maps of text and KnitContents as dicts.
1354
options.append('fulltext')
1355
# isinstance is slower and we have no hierarchy.
1356
if self.factory.__class__ == KnitPlainFactory:
1357
# Use the already joined bytes saving iteration time in
1359
size, bytes = self._data._record_to_data(version_id, digest,
1360
lines, [line_bytes])
1362
# get mixed annotation + content and feed it into the
1364
store_lines = self.factory.lower_fulltext(content)
1365
size, bytes = self._data._record_to_data(version_id, digest,
1368
access_memo = self._data.add_raw_records([size], bytes)[0]
1369
self._index.add_versions(
1370
((version_id, options, access_memo, parents),),
1371
random_id=random_id)
1372
return digest, text_length, content
1374
def check(self, progress_bar=None):
1375
"""See VersionedFile.check()."""
1376
# This doesn't actually test extraction of everything, but that will
1377
# impact 'bzr check' substantially, and needs to be integrated with
1378
# care. However, it does check for the obvious problem of a delta with
1380
versions = self.versions()
1381
parent_map = self.get_parent_map(versions)
1382
for version in versions:
1383
if self._index.get_method(version) != 'fulltext':
1384
compression_parent = parent_map[version][0]
1385
if compression_parent not in parent_map:
1386
raise errors.KnitCorrupt(self,
1387
"Missing basis parent %s for %s" % (
1388
compression_parent, version))
1390
def get_lines(self, version_id):
1391
"""See VersionedFile.get_lines()."""
1392
return self.get_line_list([version_id])[0]
1394
def _get_record_map(self, version_ids):
1395
"""Produce a dictionary of knit records.
1397
:return: {version_id:(record, record_details, digest, next)}
1399
data returned from read_records
1401
opaque information to pass to parse_record
1403
SHA1 digest of the full text after all steps are done
1405
build-parent of the version, i.e. the leftmost ancestor.
1406
Will be None if the record is not a delta.
1408
position_map = self._get_components_positions(version_ids)
1409
# c = component_id, r = record_details, i_m = index_memo, n = next
1410
records = [(c, i_m) for c, (r, i_m, n)
1411
in position_map.iteritems()]
1413
for component_id, record, digest in \
1414
self._data.read_records_iter(records):
1415
(record_details, index_memo, next) = position_map[component_id]
1416
record_map[component_id] = record, record_details, digest, next
1420
def get_text(self, version_id):
1421
"""See VersionedFile.get_text"""
1422
return self.get_texts([version_id])[0]
1424
def get_texts(self, version_ids):
1425
return [''.join(l) for l in self.get_line_list(version_ids)]
1427
def get_line_list(self, version_ids):
1428
"""Return the texts of listed versions as a list of strings."""
1429
for version_id in version_ids:
1430
self.check_not_reserved_id(version_id)
1431
text_map, content_map = self._get_content_maps(version_ids)
1432
return [text_map[v] for v in version_ids]
1434
_get_lf_split_line_list = get_line_list
1436
def _get_content_maps(self, version_ids):
1437
"""Produce maps of text and KnitContents
2052
1439
:return: (text_map, content_map) where text_map contains the texts for
2053
the requested versions and content_map contains the KnitContents.
1440
the requested versions and content_map contains the KnitContents.
1441
Both dicts take version_ids as their keys.
2055
# NB: By definition we never need to read remote sources unless texts
2056
# are requested from them: we don't delta across stores - and we
2057
# explicitly do not want to to prevent data loss situations.
2058
if self.global_map is None:
2059
self.global_map = self.vf.get_parent_map(self.keys)
2060
nonlocal_keys = self.nonlocal_keys
2062
missing_keys = set(nonlocal_keys)
2063
# Read from remote versioned file instances and provide to our caller.
2064
for source in self.vf._fallback_vfs:
2065
if not missing_keys:
2067
# Loop over fallback repositories asking them for texts - ignore
2068
# any missing from a particular fallback.
2069
for record in source.get_record_stream(missing_keys,
2070
self._ordering, True):
2071
if record.storage_kind == 'absent':
2072
# Not in thie particular stream, may be in one of the
2073
# other fallback vfs objects.
2075
missing_keys.remove(record.key)
2078
if self._raw_record_map is None:
2079
raise AssertionError('_raw_record_map should have been filled')
2081
for key in self.keys:
2082
if key in self.nonlocal_keys:
2084
yield LazyKnitContentFactory(key, self.global_map[key], self, first)
2087
def _get_one_work(self, requested_key):
2088
# Now, if we have calculated everything already, just return the
2090
if requested_key in self._contents_map:
2091
return self._contents_map[requested_key]
2092
# To simplify things, parse everything at once - code that wants one text
2093
# probably wants them all.
2094
1443
# FUTURE: This function could be improved for the 'extract many' case
2095
1444
# by tracking each component and only doing the copy when the number of
2096
1445
# children than need to apply delta's to it is > 1 or it is part of the
2097
1446
# final output.
2098
multiple_versions = len(self.keys) != 1
2099
if self._record_map is None:
2100
self._record_map = self.vf._raw_map_to_record_map(
2101
self._raw_record_map)
2102
record_map = self._record_map
2103
# raw_record_map is key:
2104
# Have read and parsed records at this point.
2105
for key in self.keys:
2106
if key in self.nonlocal_keys:
1447
version_ids = list(version_ids)
1448
multiple_versions = len(version_ids) != 1
1449
record_map = self._get_record_map(version_ids)
1454
for version_id in version_ids:
2109
1455
components = []
2111
1457
while cursor is not None:
2113
record, record_details, digest, next = record_map[cursor]
2115
raise RevisionNotPresent(cursor, self)
1458
record, record_details, digest, next = record_map[cursor]
2116
1459
components.append((cursor, record, record_details, digest))
1460
if cursor in content_map:
2118
if cursor in self._contents_map:
2119
# no need to plan further back
2120
components.append((cursor, None, None, None))
2124
1465
for (component_id, record, record_details,
2125
1466
digest) in reversed(components):
2126
if component_id in self._contents_map:
2127
content = self._contents_map[component_id]
1467
if component_id in content_map:
1468
content = content_map[component_id]
2129
content, delta = self._factory.parse_record(key[-1],
1470
content, delta = self.factory.parse_record(version_id,
2130
1471
record, record_details, content,
2131
1472
copy_base_content=multiple_versions)
2132
1473
if multiple_versions:
2133
self._contents_map[component_id] = content
1474
content_map[component_id] = content
1476
content.cleanup_eol(copy_on_mutate=multiple_versions)
1477
final_content[version_id] = content
2135
1479
# digest here is the digest from the last applied component.
2136
1480
text = content.text()
2137
1481
actual_sha = sha_strings(text)
2138
1482
if actual_sha != digest:
2139
raise SHA1KnitCorrupt(self, actual_sha, digest, key, text)
2140
if multiple_versions:
2141
return self._contents_map[requested_key]
2145
def _wire_bytes(self):
2146
"""Get the bytes to put on the wire for 'key'.
2148
The first collection of bytes asked for returns the serialised
2149
raw_record_map and the additional details (key, parent) for key.
2150
Subsequent calls return just the additional details (key, parent).
2151
The wire storage_kind given for the first key is 'knit-delta-closure',
2152
For subsequent keys it is 'knit-delta-closure-ref'.
2154
:param key: A key from the content generator.
2155
:return: Bytes to put on the wire.
2158
# kind marker for dispatch on the far side,
2159
lines.append('knit-delta-closure')
2161
if self.vf._factory.annotated:
2162
lines.append('annotated')
2165
# then the list of keys
2166
lines.append('\t'.join(['\x00'.join(key) for key in self.keys
2167
if key not in self.nonlocal_keys]))
2168
# then the _raw_record_map in serialised form:
2170
# for each item in the map:
2172
# 1 line with parents if the key is to be yielded (None: for None, '' for ())
2173
# one line with method
2174
# one line with noeol
2175
# one line with next ('' for None)
2176
# one line with byte count of the record bytes
2178
for key, (record_bytes, (method, noeol), next) in \
2179
self._raw_record_map.iteritems():
2180
key_bytes = '\x00'.join(key)
2181
parents = self.global_map.get(key, None)
2183
parent_bytes = 'None:'
2185
parent_bytes = '\t'.join('\x00'.join(key) for key in parents)
2186
method_bytes = method
2192
next_bytes = '\x00'.join(next)
2195
map_byte_list.append('%s\n%s\n%s\n%s\n%s\n%d\n%s' % (
2196
key_bytes, parent_bytes, method_bytes, noeol_bytes, next_bytes,
2197
len(record_bytes), record_bytes))
2198
map_bytes = ''.join(map_byte_list)
2199
lines.append(map_bytes)
2200
bytes = '\n'.join(lines)
2204
class _VFContentMapGenerator(_ContentMapGenerator):
2205
"""Content map generator reading from a VersionedFiles object."""
2207
def __init__(self, versioned_files, keys, nonlocal_keys=None,
2208
global_map=None, raw_record_map=None, ordering='unordered'):
2209
"""Create a _ContentMapGenerator.
2211
:param versioned_files: The versioned files that the texts are being
2213
:param keys: The keys to produce content maps for.
2214
:param nonlocal_keys: An iterable of keys(possibly intersecting keys)
2215
which are known to not be in this knit, but rather in one of the
2217
:param global_map: The result of get_parent_map(keys) (or a supermap).
2218
This is required if get_record_stream() is to be used.
2219
:param raw_record_map: A unparsed raw record map to use for answering
2222
_ContentMapGenerator.__init__(self, ordering=ordering)
2223
# The vf to source data from
2224
self.vf = versioned_files
2226
self.keys = list(keys)
2227
# Keys known to be in fallback vfs objects
2228
if nonlocal_keys is None:
2229
self.nonlocal_keys = set()
2231
self.nonlocal_keys = frozenset(nonlocal_keys)
2232
# Parents data for keys to be returned in get_record_stream
2233
self.global_map = global_map
2234
# The chunked lists for self.keys in text form
2236
# A cache of KnitContent objects used in extracting texts.
2237
self._contents_map = {}
2238
# All the knit records needed to assemble the requested keys as full
2240
self._record_map = None
2241
if raw_record_map is None:
2242
self._raw_record_map = self.vf._get_record_map_unparsed(keys,
2245
self._raw_record_map = raw_record_map
2246
# the factory for parsing records
2247
self._factory = self.vf._factory
2250
class _NetworkContentMapGenerator(_ContentMapGenerator):
2251
"""Content map generator sourced from a network stream."""
2253
def __init__(self, bytes, line_end):
2254
"""Construct a _NetworkContentMapGenerator from a bytes block."""
2256
self.global_map = {}
2257
self._raw_record_map = {}
2258
self._contents_map = {}
2259
self._record_map = None
2260
self.nonlocal_keys = []
2261
# Get access to record parsing facilities
2262
self.vf = KnitVersionedFiles(None, None)
2265
line_end = bytes.find('\n', start)
2266
line = bytes[start:line_end]
2267
start = line_end + 1
2268
if line == 'annotated':
2269
self._factory = KnitAnnotateFactory()
2271
self._factory = KnitPlainFactory()
2272
# list of keys to emit in get_record_stream
2273
line_end = bytes.find('\n', start)
2274
line = bytes[start:line_end]
2275
start = line_end + 1
2277
tuple(segment.split('\x00')) for segment in line.split('\t')
2279
# now a loop until the end. XXX: It would be nice if this was just a
2280
# bunch of the same records as get_record_stream(..., False) gives, but
2281
# there is a decent sized gap stopping that at the moment.
2285
line_end = bytes.find('\n', start)
2286
key = tuple(bytes[start:line_end].split('\x00'))
2287
start = line_end + 1
2288
# 1 line with parents (None: for None, '' for ())
2289
line_end = bytes.find('\n', start)
2290
line = bytes[start:line_end]
2295
[tuple(segment.split('\x00')) for segment in line.split('\t')
2297
self.global_map[key] = parents
2298
start = line_end + 1
2299
# one line with method
2300
line_end = bytes.find('\n', start)
2301
line = bytes[start:line_end]
2303
start = line_end + 1
2304
# one line with noeol
2305
line_end = bytes.find('\n', start)
2306
line = bytes[start:line_end]
2308
start = line_end + 1
2309
# one line with next ('' for None)
2310
line_end = bytes.find('\n', start)
2311
line = bytes[start:line_end]
2315
next = tuple(bytes[start:line_end].split('\x00'))
2316
start = line_end + 1
2317
# one line with byte count of the record bytes
2318
line_end = bytes.find('\n', start)
2319
line = bytes[start:line_end]
2321
start = line_end + 1
2323
record_bytes = bytes[start:start+count]
2324
start = start + count
2326
self._raw_record_map[key] = (record_bytes, (method, noeol), next)
2328
def get_record_stream(self):
2329
"""Get a record stream for for keys requested by the bytestream."""
2331
for key in self.keys:
2332
yield LazyKnitContentFactory(key, self.global_map[key], self, first)
2335
def _wire_bytes(self):
2339
class _KndxIndex(object):
2340
"""Manages knit index files
2342
The index is kept in memory and read on startup, to enable
1483
raise KnitCorrupt(self.filename,
1485
'\n of reconstructed text does not match'
1487
'\n for version %s' %
1488
(actual_sha, digest, version_id))
1489
text_map[version_id] = text
1490
return text_map, final_content
1492
def iter_lines_added_or_present_in_versions(self, version_ids=None,
1494
"""See VersionedFile.iter_lines_added_or_present_in_versions()."""
1495
if version_ids is None:
1496
version_ids = self.versions()
1498
pb = progress.DummyProgress()
1499
# we don't care about inclusions, the caller cares.
1500
# but we need to setup a list of records to visit.
1501
# we need version_id, position, length
1502
version_id_records = []
1503
requested_versions = set(version_ids)
1504
# filter for available versions
1505
for version_id in requested_versions:
1506
if not self.has_version(version_id):
1507
raise RevisionNotPresent(version_id, self.filename)
1508
# get a in-component-order queue:
1509
for version_id in self.versions():
1510
if version_id in requested_versions:
1511
index_memo = self._index.get_position(version_id)
1512
version_id_records.append((version_id, index_memo))
1514
total = len(version_id_records)
1515
for version_idx, (version_id, data, sha_value) in \
1516
enumerate(self._data.read_records_iter(version_id_records)):
1517
pb.update('Walking content.', version_idx, total)
1518
method = self._index.get_method(version_id)
1520
assert method in ('fulltext', 'line-delta')
1521
if method == 'fulltext':
1522
line_iterator = self.factory.get_fulltext_content(data)
1524
line_iterator = self.factory.get_linedelta_content(data)
1525
# XXX: It might be more efficient to yield (version_id,
1526
# line_iterator) in the future. However for now, this is a simpler
1527
# change to integrate into the rest of the codebase. RBC 20071110
1528
for line in line_iterator:
1529
yield line, version_id
1531
pb.update('Walking content.', total, total)
1533
def num_versions(self):
1534
"""See VersionedFile.num_versions()."""
1535
return self._index.num_versions()
1537
__len__ = num_versions
1539
def annotate(self, version_id):
1540
"""See VersionedFile.annotate."""
1541
return self.factory.annotate(self, version_id)
1543
def get_parent_map(self, version_ids):
1544
"""See VersionedFile.get_parent_map."""
1545
return self._index.get_parent_map(version_ids)
1547
def get_ancestry(self, versions, topo_sorted=True):
1548
"""See VersionedFile.get_ancestry."""
1549
if isinstance(versions, basestring):
1550
versions = [versions]
1553
return self._index.get_ancestry(versions, topo_sorted)
1555
def get_ancestry_with_ghosts(self, versions):
1556
"""See VersionedFile.get_ancestry_with_ghosts."""
1557
if isinstance(versions, basestring):
1558
versions = [versions]
1561
return self._index.get_ancestry_with_ghosts(versions)
1563
def plan_merge(self, ver_a, ver_b):
1564
"""See VersionedFile.plan_merge."""
1565
ancestors_b = set(self.get_ancestry(ver_b, topo_sorted=False))
1566
ancestors_a = set(self.get_ancestry(ver_a, topo_sorted=False))
1567
annotated_a = self.annotate(ver_a)
1568
annotated_b = self.annotate(ver_b)
1569
return merge._plan_annotate_merge(annotated_a, annotated_b,
1570
ancestors_a, ancestors_b)
1573
class _KnitComponentFile(object):
1574
"""One of the files used to implement a knit database"""
1576
def __init__(self, transport, filename, mode, file_mode=None,
1577
create_parent_dir=False, dir_mode=None):
1578
self._transport = transport
1579
self._filename = filename
1581
self._file_mode = file_mode
1582
self._dir_mode = dir_mode
1583
self._create_parent_dir = create_parent_dir
1584
self._need_to_create = False
1586
def _full_path(self):
1587
"""Return the full path to this file."""
1588
return self._transport.base + self._filename
1590
def check_header(self, fp):
1591
line = fp.readline()
1593
# An empty file can actually be treated as though the file doesn't
1595
raise errors.NoSuchFile(self._full_path())
1596
if line != self.HEADER:
1597
raise KnitHeaderError(badline=line,
1598
filename=self._transport.abspath(self._filename))
1601
return '%s(%s)' % (self.__class__.__name__, self._filename)
1604
class _KnitIndex(_KnitComponentFile):
1605
"""Manages knit index file.
1607
The index is already kept in memory and read on startup, to enable
2343
1608
fast lookups of revision information. The cursor of the index
2344
1609
file is always pointing to the end, making it easy to append
2387
1652
to ensure that records always start on new lines even if the last write was
2388
1653
interrupted. As a result its normal for the last line in the index to be
2389
1654
missing a trailing newline. One can be added with no harmful effects.
2391
:ivar _kndx_cache: dict from prefix to the old state of KnitIndex objects,
2392
where prefix is e.g. the (fileid,) for .texts instances or () for
2393
constant-mapped things like .revisions, and the old state is
2394
tuple(history_vector, cache_dict). This is used to prevent having an
2395
ABI change with the C extension that reads .kndx files.
2398
1657
HEADER = "# bzr knit index 8\n"
2400
def __init__(self, transport, mapper, get_scope, allow_writes, is_locked):
2401
"""Create a _KndxIndex on transport using mapper."""
2402
self._transport = transport
2403
self._mapper = mapper
2404
self._get_scope = get_scope
2405
self._allow_writes = allow_writes
2406
self._is_locked = is_locked
2408
self.has_graph = True
2410
def add_records(self, records, random_id=False, missing_compression_parents=False):
2411
"""Add multiple records to the index.
2413
:param records: a list of tuples:
2414
(key, options, access_memo, parents).
2415
:param random_id: If True the ids being added were randomly generated
2416
and no check for existence will be performed.
2417
:param missing_compression_parents: If True the records being added are
2418
only compressed against texts already in the index (or inside
2419
records). If False the records all refer to unavailable texts (or
2420
texts inside records) as compression parents.
2422
if missing_compression_parents:
2423
# It might be nice to get the edge of the records. But keys isn't
2425
keys = sorted(record[0] for record in records)
2426
raise errors.RevisionNotPresent(keys, self)
2428
for record in records:
2431
path = self._mapper.map(key) + '.kndx'
2432
path_keys = paths.setdefault(path, (prefix, []))
2433
path_keys[1].append(record)
2434
for path in sorted(paths):
2435
prefix, path_keys = paths[path]
2436
self._load_prefixes([prefix])
2438
orig_history = self._kndx_cache[prefix][1][:]
2439
orig_cache = self._kndx_cache[prefix][0].copy()
2442
for key, options, (_, pos, size), parents in path_keys:
2444
# kndx indices cannot be parentless.
2446
line = "\n%s %s %s %s %s :" % (
2447
key[-1], ','.join(options), pos, size,
2448
self._dictionary_compress(parents))
2449
if type(line) is not str:
2450
raise AssertionError(
2451
'data must be utf8 was %s' % type(line))
2453
self._cache_key(key, options, pos, size, parents)
2454
if len(orig_history):
2455
self._transport.append_bytes(path, ''.join(lines))
2457
self._init_index(path, lines)
2459
# If any problems happen, restore the original values and re-raise
2460
self._kndx_cache[prefix] = (orig_cache, orig_history)
2463
def scan_unvalidated_index(self, graph_index):
2464
"""See _KnitGraphIndex.scan_unvalidated_index."""
2465
# Because kndx files do not support atomic insertion via separate index
2466
# files, they do not support this method.
2467
raise NotImplementedError(self.scan_unvalidated_index)
2469
def get_missing_compression_parents(self):
2470
"""See _KnitGraphIndex.get_missing_compression_parents."""
2471
# Because kndx files do not support atomic insertion via separate index
2472
# files, they do not support this method.
2473
raise NotImplementedError(self.get_missing_compression_parents)
2475
def _cache_key(self, key, options, pos, size, parent_keys):
1659
# speed of knit parsing went from 280 ms to 280 ms with slots addition.
1660
# __slots__ = ['_cache', '_history', '_transport', '_filename']
1662
def _cache_version(self, version_id, options, pos, size, parents):
2476
1663
"""Cache a version record in the history array and index cache.
2478
1665
This is inlined into _load_data for performance. KEEP IN SYNC.
2479
1666
(It saves 60ms, 25% of the __init__ overhead on local 4000 record
2483
version_id = key[-1]
2484
# last-element only for compatibilty with the C load_data.
2485
parents = tuple(parent[-1] for parent in parent_keys)
2486
for parent in parent_keys:
2487
if parent[:-1] != prefix:
2488
raise ValueError("mismatched prefixes for %r, %r" % (
2490
cache, history = self._kndx_cache[prefix]
2491
1669
# only want the _history index to reference the 1st index entry
2492
1670
# for version_id
2493
if version_id not in cache:
2494
index = len(history)
2495
history.append(version_id)
1671
if version_id not in self._cache:
1672
index = len(self._history)
1673
self._history.append(version_id)
2497
index = cache[version_id][5]
2498
cache[version_id] = (version_id,
1675
index = self._cache[version_id][5]
1676
self._cache[version_id] = (version_id,
2505
def check_header(self, fp):
2506
line = fp.readline()
2508
# An empty file can actually be treated as though the file doesn't
2510
raise errors.NoSuchFile(self)
2511
if line != self.HEADER:
2512
raise KnitHeaderError(badline=line, filename=self)
2514
def _check_read(self):
2515
if not self._is_locked():
2516
raise errors.ObjectNotLocked(self)
2517
if self._get_scope() != self._scope:
2520
1683
def _check_write_ok(self):
2521
"""Assert if not writes are permitted."""
2522
if not self._is_locked():
2523
raise errors.ObjectNotLocked(self)
2524
1684
if self._get_scope() != self._scope:
1685
raise errors.OutSideTransaction()
2526
1686
if self._mode != 'w':
2527
1687
raise errors.ReadOnlyObjectDirtiedError(self)
2529
def get_build_details(self, keys):
2530
"""Get the method, index_memo and compression parent for keys.
1689
def __init__(self, transport, filename, mode, create=False, file_mode=None,
1690
create_parent_dir=False, delay_create=False, dir_mode=None,
1692
_KnitComponentFile.__init__(self, transport, filename, mode,
1693
file_mode=file_mode,
1694
create_parent_dir=create_parent_dir,
1697
# position in _history is the 'official' index for a revision
1698
# but the values may have come from a newer entry.
1699
# so - wc -l of a knit index is != the number of unique names
1703
fp = self._transport.get(self._filename)
1705
# _load_data may raise NoSuchFile if the target knit is
1707
_load_data(self, fp)
1711
if mode != 'w' or not create:
1714
self._need_to_create = True
1716
self._transport.put_bytes_non_atomic(
1717
self._filename, self.HEADER, mode=self._file_mode)
1718
self._scope = get_scope()
1719
self._get_scope = get_scope
1721
def get_ancestry(self, versions, topo_sorted=True):
1722
"""See VersionedFile.get_ancestry."""
1723
# get a graph of all the mentioned versions:
1725
pending = set(versions)
1728
version = pending.pop()
1731
parents = [p for p in cache[version][4] if p in cache]
1733
raise RevisionNotPresent(version, self._filename)
1734
# if not completed and not a ghost
1735
pending.update([p for p in parents if p not in graph])
1736
graph[version] = parents
1739
return topo_sort(graph.items())
1741
def get_ancestry_with_ghosts(self, versions):
1742
"""See VersionedFile.get_ancestry_with_ghosts."""
1743
# get a graph of all the mentioned versions:
1744
self.check_versions_present(versions)
1747
pending = set(versions)
1749
version = pending.pop()
1751
parents = cache[version][4]
1757
pending.update([p for p in parents if p not in graph])
1758
graph[version] = parents
1759
return topo_sort(graph.items())
1761
def get_build_details(self, version_ids):
1762
"""Get the method, index_memo and compression parent for version_ids.
2532
1764
Ghosts are omitted from the result.
2534
:param keys: An iterable of keys.
2535
:return: A dict of key:(index_memo, compression_parent, parents,
1766
:param version_ids: An iterable of version_ids.
1767
:return: A dict of version_id:(index_memo, compression_parent,
1768
parents, record_details).
2538
1770
opaque structure to pass to read_records to extract the raw
2545
1777
extra information about the content which needs to be passed to
2546
1778
Factory.parse_record
2548
parent_map = self.get_parent_map(keys)
2551
if key not in parent_map:
2553
method = self.get_method(key)
2554
parents = parent_map[key]
1781
for version_id in version_ids:
1782
if version_id not in self._cache:
1783
# ghosts are omitted
1785
method = self.get_method(version_id)
1786
parents = self.get_parents_with_ghosts(version_id)
2555
1787
if method == 'fulltext':
2556
1788
compression_parent = None
2558
1790
compression_parent = parents[0]
2559
noeol = 'no-eol' in self.get_options(key)
2560
index_memo = self.get_position(key)
2561
result[key] = (index_memo, compression_parent,
1791
noeol = 'no-eol' in self.get_options(version_id)
1792
index_memo = self.get_position(version_id)
1793
result[version_id] = (index_memo, compression_parent,
2562
1794
parents, (method, noeol))
2565
def get_method(self, key):
2566
"""Return compression method of specified key."""
2567
options = self.get_options(key)
2568
if 'fulltext' in options:
2570
elif 'line-delta' in options:
2573
raise errors.KnitIndexUnknownMethod(self, options)
2575
def get_options(self, key):
2576
"""Return a list representing options.
2580
prefix, suffix = self._split_key(key)
2581
self._load_prefixes([prefix])
2583
return self._kndx_cache[prefix][0][suffix][1]
2585
raise RevisionNotPresent(key, self)
2587
def find_ancestry(self, keys):
2588
"""See CombinedGraphIndex.find_ancestry()"""
2589
prefixes = set(key[:-1] for key in keys)
2590
self._load_prefixes(prefixes)
2593
missing_keys = set()
2594
pending_keys = list(keys)
2595
# This assumes that keys will not reference parents in a different
2596
# prefix, which is accurate so far.
2598
key = pending_keys.pop()
2599
if key in parent_map:
2603
suffix_parents = self._kndx_cache[prefix][0][key[-1]][4]
2605
missing_keys.add(key)
2607
parent_keys = tuple([prefix + (suffix,)
2608
for suffix in suffix_parents])
2609
parent_map[key] = parent_keys
2610
pending_keys.extend([p for p in parent_keys
2611
if p not in parent_map])
2612
return parent_map, missing_keys
2614
def get_parent_map(self, keys):
2615
"""Get a map of the parents of keys.
2617
:param keys: The keys to look up parents for.
2618
:return: A mapping from keys to parents. Absent keys are absent from
2621
# Parse what we need to up front, this potentially trades off I/O
2622
# locality (.kndx and .knit in the same block group for the same file
2623
# id) for less checking in inner loops.
2624
prefixes = set(key[:-1] for key in keys)
2625
self._load_prefixes(prefixes)
2630
suffix_parents = self._kndx_cache[prefix][0][key[-1]][4]
2634
result[key] = tuple(prefix + (suffix,) for
2635
suffix in suffix_parents)
2638
def get_position(self, key):
2639
"""Return details needed to access the version.
2641
:return: a tuple (key, data position, size) to hand to the access
2642
logic to get the record.
2644
prefix, suffix = self._split_key(key)
2645
self._load_prefixes([prefix])
2646
entry = self._kndx_cache[prefix][0][suffix]
2647
return key, entry[2], entry[3]
2649
has_key = _mod_index._has_key_from_parent_map
2651
def _init_index(self, path, extra_lines=[]):
2652
"""Initialize an index."""
2654
sio.write(self.HEADER)
2655
sio.writelines(extra_lines)
2657
self._transport.put_file_non_atomic(path, sio,
2658
create_parent_dir=True)
2659
# self._create_parent_dir)
2660
# mode=self._file_mode,
2661
# dir_mode=self._dir_mode)
2664
"""Get all the keys in the collection.
2666
The keys are not ordered.
2669
# Identify all key prefixes.
2670
# XXX: A bit hacky, needs polish.
2671
if type(self._mapper) is ConstantMapper:
2675
for quoted_relpath in self._transport.iter_files_recursive():
2676
path, ext = os.path.splitext(quoted_relpath)
2678
prefixes = [self._mapper.unmap(path) for path in relpaths]
2679
self._load_prefixes(prefixes)
2680
for prefix in prefixes:
2681
for suffix in self._kndx_cache[prefix][1]:
2682
result.add(prefix + (suffix,))
2685
def _load_prefixes(self, prefixes):
2686
"""Load the indices for prefixes."""
2688
for prefix in prefixes:
2689
if prefix not in self._kndx_cache:
2690
# the load_data interface writes to these variables.
2693
self._filename = prefix
2695
path = self._mapper.map(prefix) + '.kndx'
2696
fp = self._transport.get(path)
2698
# _load_data may raise NoSuchFile if the target knit is
2700
_load_data(self, fp)
2703
self._kndx_cache[prefix] = (self._cache, self._history)
2708
self._kndx_cache[prefix] = ({}, [])
2709
if type(self._mapper) is ConstantMapper:
2710
# preserve behaviour for revisions.kndx etc.
2711
self._init_index(path)
2716
missing_keys = _mod_index._missing_keys_from_parent_map
2718
def _partition_keys(self, keys):
2719
"""Turn keys into a dict of prefix:suffix_list."""
2722
prefix_keys = result.setdefault(key[:-1], [])
2723
prefix_keys.append(key[-1])
2726
def _dictionary_compress(self, keys):
2727
"""Dictionary compress keys.
2729
:param keys: The keys to generate references to.
2730
:return: A string representation of keys. keys which are present are
2731
dictionary compressed, and others are emitted as fulltext with a
1797
def num_versions(self):
1798
return len(self._history)
1800
__len__ = num_versions
1802
def get_versions(self):
1803
"""Get all the versions in the file. not topologically sorted."""
1804
return self._history
1806
def _version_list_to_index(self, versions):
2736
1807
result_list = []
2737
prefix = keys[0][:-1]
2738
cache = self._kndx_cache[prefix][0]
2740
if key[:-1] != prefix:
2741
# kndx indices cannot refer across partitioned storage.
2742
raise ValueError("mismatched prefixes for %r" % keys)
2743
if key[-1] in cache:
1809
for version in versions:
1810
if version in cache:
2744
1811
# -- inlined lookup() --
2745
result_list.append(str(cache[key[-1]][5]))
1812
result_list.append(str(cache[version][5]))
2746
1813
# -- end lookup () --
2748
result_list.append('.' + key[-1])
1815
result_list.append('.' + version)
2749
1816
return ' '.join(result_list)
2751
def _reset_cache(self):
2752
# Possibly this should be a LRU cache. A dictionary from key_prefix to
2753
# (cache_dict, history_vector) for parsed kndx files.
2754
self._kndx_cache = {}
2755
self._scope = self._get_scope()
2756
allow_writes = self._allow_writes()
2762
def _sort_keys_by_io(self, keys, positions):
2763
"""Figure out an optimal order to read the records for the given keys.
2765
Sort keys, grouped by index and sorted by position.
2767
:param keys: A list of keys whose records we want to read. This will be
2769
:param positions: A dict, such as the one returned by
2770
_get_components_positions()
2773
def get_sort_key(key):
2774
index_memo = positions[key][1]
2775
# Group by prefix and position. index_memo[0] is the key, so it is
2776
# (file_id, revision_id) and we don't want to sort on revision_id,
2777
# index_memo[1] is the position, and index_memo[2] is the size,
2778
# which doesn't matter for the sort
2779
return index_memo[0][:-1], index_memo[1]
2780
return keys.sort(key=get_sort_key)
2782
_get_total_build_size = _get_total_build_size
2784
def _split_key(self, key):
2785
"""Split key into a prefix and suffix."""
2786
return key[:-1], key[-1]
2789
class _KeyRefs(object):
2791
def __init__(self, track_new_keys=False):
2792
# dict mapping 'key' to 'set of keys referring to that key'
2795
# set remembering all new keys
2796
self.new_keys = set()
2798
self.new_keys = None
2804
self.new_keys.clear()
2806
def add_references(self, key, refs):
2807
# Record the new references
2808
for referenced in refs:
1818
def add_version(self, version_id, options, index_memo, parents):
1819
"""Add a version record to the index."""
1820
self.add_versions(((version_id, options, index_memo, parents),))
1822
def add_versions(self, versions, random_id=False):
1823
"""Add multiple versions to the index.
1825
:param versions: a list of tuples:
1826
(version_id, options, pos, size, parents).
1827
:param random_id: If True the ids being added were randomly generated
1828
and no check for existence will be performed.
1831
orig_history = self._history[:]
1832
orig_cache = self._cache.copy()
1835
for version_id, options, (index, pos, size), parents in versions:
1836
line = "\n%s %s %s %s %s :" % (version_id,
1840
self._version_list_to_index(parents))
1841
assert isinstance(line, str), \
1842
'content must be utf-8 encoded: %r' % (line,)
1844
self._cache_version(version_id, options, pos, size, tuple(parents))
1845
if not self._need_to_create:
1846
self._transport.append_bytes(self._filename, ''.join(lines))
1849
sio.write(self.HEADER)
1850
sio.writelines(lines)
1852
self._transport.put_file_non_atomic(self._filename, sio,
1853
create_parent_dir=self._create_parent_dir,
1854
mode=self._file_mode,
1855
dir_mode=self._dir_mode)
1856
self._need_to_create = False
1858
# If any problems happen, restore the original values and re-raise
1859
self._history = orig_history
1860
self._cache = orig_cache
1863
def has_version(self, version_id):
1864
"""True if the version is in the index."""
1865
return version_id in self._cache
1867
def get_position(self, version_id):
1868
"""Return details needed to access the version.
1870
.kndx indices do not support split-out data, so return None for the
1873
:return: a tuple (None, data position, size) to hand to the access
1874
logic to get the record.
1876
entry = self._cache[version_id]
1877
return None, entry[2], entry[3]
1879
def get_method(self, version_id):
1880
"""Return compression method of specified version."""
1882
options = self._cache[version_id][1]
1884
raise RevisionNotPresent(version_id, self._filename)
1885
if 'fulltext' in options:
1888
if 'line-delta' not in options:
1889
raise errors.KnitIndexUnknownMethod(self._full_path(), options)
1892
def get_options(self, version_id):
1893
"""Return a list representing options.
1897
return self._cache[version_id][1]
1899
def get_parent_map(self, version_ids):
1900
"""Passed through to by KnitVersionedFile.get_parent_map."""
1902
for version_id in version_ids:
2810
needed_by = self.refs[referenced]
1904
result[version_id] = tuple(self._cache[version_id][4])
2811
1905
except KeyError:
2812
needed_by = self.refs[referenced] = set()
2814
# Discard references satisfied by the new key
2817
def get_new_keys(self):
2818
return self.new_keys
2820
def get_unsatisfied_refs(self):
2821
return self.refs.iterkeys()
2823
def _satisfy_refs_for_key(self, key):
2827
# No keys depended on this key. That's ok.
2830
def add_key(self, key):
2831
# satisfy refs for key, and remember that we've seen this key.
2832
self._satisfy_refs_for_key(key)
2833
if self.new_keys is not None:
2834
self.new_keys.add(key)
2836
def satisfy_refs_for_keys(self, keys):
2838
self._satisfy_refs_for_key(key)
2840
def get_referrers(self):
2842
for referrers in self.refs.itervalues():
2843
result.update(referrers)
2847
class _KnitGraphIndex(object):
2848
"""A KnitVersionedFiles index layered on GraphIndex."""
2850
def __init__(self, graph_index, is_locked, deltas=False, parents=True,
2851
add_callback=None, track_external_parent_refs=False):
1909
def get_parents_with_ghosts(self, version_id):
1910
"""Return parents of specified version with ghosts."""
1912
return self.get_parent_map([version_id])[version_id]
1914
raise RevisionNotPresent(version_id, self)
1916
def check_versions_present(self, version_ids):
1917
"""Check that all specified versions are present."""
1919
for version_id in version_ids:
1920
if version_id not in cache:
1921
raise RevisionNotPresent(version_id, self._filename)
1924
class KnitGraphIndex(object):
1925
"""A knit index that builds on GraphIndex."""
1927
def __init__(self, graph_index, deltas=False, parents=True, add_callback=None):
2852
1928
"""Construct a KnitGraphIndex on a graph_index.
2854
1930
:param graph_index: An implementation of bzrlib.index.GraphIndex.
2855
:param is_locked: A callback to check whether the object should answer
2857
1931
:param deltas: Allow delta-compressed records.
2858
:param parents: If True, record knits parents, if not do not record
2860
1932
:param add_callback: If not None, allow additions to the index and call
2861
1933
this callback with a list of added GraphIndex nodes:
2862
1934
[(node, value, node_refs), ...]
2863
:param is_locked: A callback, returns True if the index is locked and
2865
:param track_external_parent_refs: If True, record all external parent
2866
references parents from added records. These can be retrieved
2867
later by calling get_missing_parents().
1935
:param parents: If True, record knits parents, if not do not record
2869
self._add_callback = add_callback
2870
1938
self._graph_index = graph_index
2871
1939
self._deltas = deltas
1940
self._add_callback = add_callback
2872
1941
self._parents = parents
2873
1942
if deltas and not parents:
2874
# XXX: TODO: Delta tree and parent graph should be conceptually
2876
1943
raise KnitCorrupt(self, "Cannot do delta compression without "
2877
1944
"parent tracking.")
2878
self.has_graph = parents
2879
self._is_locked = is_locked
2880
self._missing_compression_parents = set()
2881
if track_external_parent_refs:
2882
self._key_dependencies = _KeyRefs()
2884
self._key_dependencies = None
2887
return "%s(%r)" % (self.__class__.__name__, self._graph_index)
2889
def add_records(self, records, random_id=False,
2890
missing_compression_parents=False):
2891
"""Add multiple records to the index.
1946
def _check_write_ok(self):
1949
def _get_entries(self, keys, check_present=False):
1950
"""Get the entries for keys.
1952
:param keys: An iterable of index keys, - 1-tuples.
1957
for node in self._graph_index.iter_entries(keys):
1959
found_keys.add(node[1])
1961
# adapt parentless index to the rest of the code.
1962
for node in self._graph_index.iter_entries(keys):
1963
yield node[0], node[1], node[2], ()
1964
found_keys.add(node[1])
1966
missing_keys = keys.difference(found_keys)
1968
raise RevisionNotPresent(missing_keys.pop(), self)
1970
def _present_keys(self, version_ids):
1972
node[1] for node in self._get_entries(version_ids)])
1974
def _parentless_ancestry(self, versions):
1975
"""Honour the get_ancestry API for parentless knit indices."""
1976
wanted_keys = self._version_ids_to_keys(versions)
1977
present_keys = self._present_keys(wanted_keys)
1978
missing = set(wanted_keys).difference(present_keys)
1980
raise RevisionNotPresent(missing.pop(), self)
1981
return list(self._keys_to_version_ids(present_keys))
1983
def get_ancestry(self, versions, topo_sorted=True):
1984
"""See VersionedFile.get_ancestry."""
1985
if not self._parents:
1986
return self._parentless_ancestry(versions)
1987
# XXX: This will do len(history) index calls - perhaps
1988
# it should be altered to be a index core feature?
1989
# get a graph of all the mentioned versions:
1992
versions = self._version_ids_to_keys(versions)
1993
pending = set(versions)
1995
# get all pending nodes
1996
this_iteration = pending
1997
new_nodes = self._get_entries(this_iteration)
2000
for (index, key, value, node_refs) in new_nodes:
2001
# dont ask for ghosties - otherwise
2002
# we we can end up looping with pending
2003
# being entirely ghosted.
2004
graph[key] = [parent for parent in node_refs[0]
2005
if parent not in ghosts]
2007
for parent in graph[key]:
2008
# dont examine known nodes again
2013
ghosts.update(this_iteration.difference(found))
2014
if versions.difference(graph):
2015
raise RevisionNotPresent(versions.difference(graph).pop(), self)
2017
result_keys = topo_sort(graph.items())
2019
result_keys = graph.iterkeys()
2020
return [key[0] for key in result_keys]
2022
def get_ancestry_with_ghosts(self, versions):
2023
"""See VersionedFile.get_ancestry."""
2024
if not self._parents:
2025
return self._parentless_ancestry(versions)
2026
# XXX: This will do len(history) index calls - perhaps
2027
# it should be altered to be a index core feature?
2028
# get a graph of all the mentioned versions:
2030
versions = self._version_ids_to_keys(versions)
2031
pending = set(versions)
2033
# get all pending nodes
2034
this_iteration = pending
2035
new_nodes = self._get_entries(this_iteration)
2037
for (index, key, value, node_refs) in new_nodes:
2038
graph[key] = node_refs[0]
2040
for parent in graph[key]:
2041
# dont examine known nodes again
2045
missing_versions = this_iteration.difference(graph)
2046
missing_needed = versions.intersection(missing_versions)
2048
raise RevisionNotPresent(missing_needed.pop(), self)
2049
for missing_version in missing_versions:
2050
# add a key, no parents
2051
graph[missing_version] = []
2052
pending.discard(missing_version) # don't look for it
2053
result_keys = topo_sort(graph.items())
2054
return [key[0] for key in result_keys]
2056
def get_build_details(self, version_ids):
2057
"""Get the method, index_memo and compression parent for version_ids.
2059
Ghosts are omitted from the result.
2061
:param version_ids: An iterable of version_ids.
2062
:return: A dict of version_id:(index_memo, compression_parent,
2063
parents, record_details).
2065
opaque structure to pass to read_records to extract the raw
2068
Content that this record is built upon, may be None
2070
Logical parents of this node
2072
extra information about the content which needs to be passed to
2073
Factory.parse_record
2076
entries = self._get_entries(self._version_ids_to_keys(version_ids), True)
2077
for entry in entries:
2078
version_id = self._keys_to_version_ids((entry[1],))[0]
2079
if not self._parents:
2082
parents = self._keys_to_version_ids(entry[3][0])
2083
if not self._deltas:
2084
compression_parent = None
2086
compression_parent_key = self._compression_parent(entry)
2087
if compression_parent_key:
2088
compression_parent = self._keys_to_version_ids(
2089
(compression_parent_key,))[0]
2091
compression_parent = None
2092
noeol = (entry[2][0] == 'N')
2093
if compression_parent:
2094
method = 'line-delta'
2097
result[version_id] = (self._node_to_position(entry),
2098
compression_parent, parents,
2102
def _compression_parent(self, an_entry):
2103
# return the key that an_entry is compressed against, or None
2104
# Grab the second parent list (as deltas implies parents currently)
2105
compression_parents = an_entry[3][1]
2106
if not compression_parents:
2108
assert len(compression_parents) == 1
2109
return compression_parents[0]
2111
def _get_method(self, node):
2112
if not self._deltas:
2114
if self._compression_parent(node):
2119
def num_versions(self):
2120
return len(list(self._graph_index.iter_all_entries()))
2122
__len__ = num_versions
2124
def get_versions(self):
2125
"""Get all the versions in the file. not topologically sorted."""
2126
return [node[1][0] for node in self._graph_index.iter_all_entries()]
2128
def has_version(self, version_id):
2129
"""True if the version is in the index."""
2130
return len(self._present_keys(self._version_ids_to_keys([version_id]))) == 1
2132
def _keys_to_version_ids(self, keys):
2133
return tuple(key[0] for key in keys)
2135
def get_position(self, version_id):
2136
"""Return details needed to access the version.
2138
:return: a tuple (index, data position, size) to hand to the access
2139
logic to get the record.
2141
node = self._get_node(version_id)
2142
return self._node_to_position(node)
2144
def _node_to_position(self, node):
2145
"""Convert an index value to position details."""
2146
bits = node[2][1:].split(' ')
2147
return node[0], int(bits[0]), int(bits[1])
2149
def get_method(self, version_id):
2150
"""Return compression method of specified version."""
2151
return self._get_method(self._get_node(version_id))
2153
def _get_node(self, version_id):
2155
return list(self._get_entries(self._version_ids_to_keys([version_id])))[0]
2157
raise RevisionNotPresent(version_id, self)
2159
def get_options(self, version_id):
2160
"""Return a list representing options.
2164
node = self._get_node(version_id)
2165
options = [self._get_method(node)]
2166
if node[2][0] == 'N':
2167
options.append('no-eol')
2170
def get_parent_map(self, version_ids):
2171
"""Passed through to by KnitVersionedFile.get_parent_map."""
2172
nodes = self._get_entries(self._version_ids_to_keys(version_ids))
2176
result[node[1][0]] = self._keys_to_version_ids(node[3][0])
2179
result[node[1][0]] = ()
2182
def get_parents_with_ghosts(self, version_id):
2183
"""Return parents of specified version with ghosts."""
2185
return self.get_parent_map([version_id])[version_id]
2187
raise RevisionNotPresent(version_id, self)
2189
def check_versions_present(self, version_ids):
2190
"""Check that all specified versions are present."""
2191
keys = self._version_ids_to_keys(version_ids)
2192
present = self._present_keys(keys)
2193
missing = keys.difference(present)
2195
raise RevisionNotPresent(missing.pop(), self)
2197
def add_version(self, version_id, options, access_memo, parents):
2198
"""Add a version record to the index."""
2199
return self.add_versions(((version_id, options, access_memo, parents),))
2201
def add_versions(self, versions, random_id=False):
2202
"""Add multiple versions to the index.
2893
2204
This function does not insert data into the Immutable GraphIndex
2894
2205
backing the KnitGraphIndex, instead it prepares data for insertion by
2895
2206
the caller and checks that it is safe to insert then calls
2896
2207
self._add_callback with the prepared GraphIndex nodes.
2898
:param records: a list of tuples:
2899
(key, options, access_memo, parents).
2209
:param versions: a list of tuples:
2210
(version_id, options, pos, size, parents).
2900
2211
:param random_id: If True the ids being added were randomly generated
2901
2212
and no check for existence will be performed.
2902
:param missing_compression_parents: If True the records being added are
2903
only compressed against texts already in the index (or inside
2904
records). If False the records all refer to unavailable texts (or
2905
texts inside records) as compression parents.
2907
2214
if not self._add_callback:
2908
2215
raise errors.ReadOnlyError(self)
2909
2216
# we hope there are no repositories with inconsistent parentage
2913
compression_parents = set()
2914
key_dependencies = self._key_dependencies
2915
for (key, options, access_memo, parents) in records:
2917
parents = tuple(parents)
2918
if key_dependencies is not None:
2919
key_dependencies.add_references(key, parents)
2221
for (version_id, options, access_memo, parents) in versions:
2920
2222
index, pos, size = access_memo
2223
key = (version_id, )
2224
parents = tuple((parent, ) for parent in parents)
2921
2225
if 'no-eol' in options:
2964
2259
for key, (value, node_refs) in keys.iteritems():
2965
2260
result.append((key, value))
2966
2261
self._add_callback(result)
2967
if missing_compression_parents:
2968
# This may appear to be incorrect (it does not check for
2969
# compression parents that are in the existing graph index),
2970
# but such records won't have been buffered, so this is
2971
# actually correct: every entry when
2972
# missing_compression_parents==True either has a missing parent, or
2973
# a parent that is one of the keys in records.
2974
compression_parents.difference_update(keys)
2975
self._missing_compression_parents.update(compression_parents)
2976
# Adding records may have satisfied missing compression parents.
2977
self._missing_compression_parents.difference_update(keys)
2979
def scan_unvalidated_index(self, graph_index):
2980
"""Inform this _KnitGraphIndex that there is an unvalidated index.
2982
This allows this _KnitGraphIndex to keep track of any missing
2983
compression parents we may want to have filled in to make those
2986
:param graph_index: A GraphIndex
2989
new_missing = graph_index.external_references(ref_list_num=1)
2990
new_missing.difference_update(self.get_parent_map(new_missing))
2991
self._missing_compression_parents.update(new_missing)
2992
if self._key_dependencies is not None:
2993
# Add parent refs from graph_index (and discard parent refs that
2994
# the graph_index has).
2995
for node in graph_index.iter_all_entries():
2996
self._key_dependencies.add_references(node[1], node[3][0])
2998
def get_missing_compression_parents(self):
2999
"""Return the keys of missing compression parents.
3001
Missing compression parents occur when a record stream was missing
3002
basis texts, or a index was scanned that had missing basis texts.
3004
return frozenset(self._missing_compression_parents)
3006
def get_missing_parents(self):
3007
"""Return the keys of missing parents."""
3008
# If updating this, you should also update
3009
# groupcompress._GCGraphIndex.get_missing_parents
3010
# We may have false positives, so filter those out.
3011
self._key_dependencies.satisfy_refs_for_keys(
3012
self.get_parent_map(self._key_dependencies.get_unsatisfied_refs()))
3013
return frozenset(self._key_dependencies.get_unsatisfied_refs())
3015
def _check_read(self):
3016
"""raise if reads are not permitted."""
3017
if not self._is_locked():
3018
raise errors.ObjectNotLocked(self)
3020
def _check_write_ok(self):
3021
"""Assert if writes are not permitted."""
3022
if not self._is_locked():
3023
raise errors.ObjectNotLocked(self)
3025
def _compression_parent(self, an_entry):
3026
# return the key that an_entry is compressed against, or None
3027
# Grab the second parent list (as deltas implies parents currently)
3028
compression_parents = an_entry[3][1]
3029
if not compression_parents:
3031
if len(compression_parents) != 1:
3032
raise AssertionError(
3033
"Too many compression parents: %r" % compression_parents)
3034
return compression_parents[0]
3036
def get_build_details(self, keys):
3037
"""Get the method, index_memo and compression parent for version_ids.
3039
Ghosts are omitted from the result.
3041
:param keys: An iterable of keys.
3042
:return: A dict of key:
3043
(index_memo, compression_parent, parents, record_details).
3045
opaque structure to pass to read_records to extract the raw
3048
Content that this record is built upon, may be None
3050
Logical parents of this node
3052
extra information about the content which needs to be passed to
3053
Factory.parse_record
3057
entries = self._get_entries(keys, False)
3058
for entry in entries:
3060
if not self._parents:
3063
parents = entry[3][0]
3064
if not self._deltas:
3065
compression_parent_key = None
3067
compression_parent_key = self._compression_parent(entry)
3068
noeol = (entry[2][0] == 'N')
3069
if compression_parent_key:
3070
method = 'line-delta'
3073
result[key] = (self._node_to_position(entry),
3074
compression_parent_key, parents,
3078
def _get_entries(self, keys, check_present=False):
3079
"""Get the entries for keys.
3081
:param keys: An iterable of index key tuples.
3086
for node in self._graph_index.iter_entries(keys):
3088
found_keys.add(node[1])
3090
# adapt parentless index to the rest of the code.
3091
for node in self._graph_index.iter_entries(keys):
3092
yield node[0], node[1], node[2], ()
3093
found_keys.add(node[1])
3095
missing_keys = keys.difference(found_keys)
3097
raise RevisionNotPresent(missing_keys.pop(), self)
3099
def get_method(self, key):
3100
"""Return compression method of specified key."""
3101
return self._get_method(self._get_node(key))
3103
def _get_method(self, node):
3104
if not self._deltas:
3106
if self._compression_parent(node):
3111
def _get_node(self, key):
3113
return list(self._get_entries([key]))[0]
3115
raise RevisionNotPresent(key, self)
3117
def get_options(self, key):
3118
"""Return a list representing options.
3122
node = self._get_node(key)
3123
options = [self._get_method(node)]
3124
if node[2][0] == 'N':
3125
options.append('no-eol')
3128
def find_ancestry(self, keys):
3129
"""See CombinedGraphIndex.find_ancestry()"""
3130
return self._graph_index.find_ancestry(keys, 0)
3132
def get_parent_map(self, keys):
3133
"""Get a map of the parents of keys.
3135
:param keys: The keys to look up parents for.
3136
:return: A mapping from keys to parents. Absent keys are absent from
3140
nodes = self._get_entries(keys)
3144
result[node[1]] = node[3][0]
3147
result[node[1]] = None
3150
def get_position(self, key):
3151
"""Return details needed to access the version.
3153
:return: a tuple (index, data position, size) to hand to the access
3154
logic to get the record.
3156
node = self._get_node(key)
3157
return self._node_to_position(node)
3159
has_key = _mod_index._has_key_from_parent_map
3162
"""Get all the keys in the collection.
3164
The keys are not ordered.
3167
return [node[1] for node in self._graph_index.iter_all_entries()]
3169
missing_keys = _mod_index._missing_keys_from_parent_map
3171
def _node_to_position(self, node):
3172
"""Convert an index value to position details."""
3173
bits = node[2][1:].split(' ')
3174
return node[0], int(bits[0]), int(bits[1])
3176
def _sort_keys_by_io(self, keys, positions):
3177
"""Figure out an optimal order to read the records for the given keys.
3179
Sort keys, grouped by index and sorted by position.
3181
:param keys: A list of keys whose records we want to read. This will be
3183
:param positions: A dict, such as the one returned by
3184
_get_components_positions()
3187
def get_index_memo(key):
3188
# index_memo is at offset [1]. It is made up of (GraphIndex,
3189
# position, size). GI is an object, which will be unique for each
3190
# pack file. This causes us to group by pack file, then sort by
3191
# position. Size doesn't matter, but it isn't worth breaking up the
3193
return positions[key][1]
3194
return keys.sort(key=get_index_memo)
3196
_get_total_build_size = _get_total_build_size
3199
class _KnitKeyAccess(object):
3200
"""Access to records in .knit files."""
3202
def __init__(self, transport, mapper):
3203
"""Create a _KnitKeyAccess with transport and mapper.
3205
:param transport: The transport the access object is rooted at.
3206
:param mapper: The mapper used to map keys to .knit files.
2263
def _version_ids_to_keys(self, version_ids):
2264
return set((version_id, ) for version_id in version_ids)
2267
class _KnitAccess(object):
2268
"""Access to knit records in a .knit file."""
2270
def __init__(self, transport, filename, _file_mode, _dir_mode,
2271
_need_to_create, _create_parent_dir):
2272
"""Create a _KnitAccess for accessing and inserting data.
2274
:param transport: The transport the .knit is located on.
2275
:param filename: The filename of the .knit.
3208
2277
self._transport = transport
3209
self._mapper = mapper
2278
self._filename = filename
2279
self._file_mode = _file_mode
2280
self._dir_mode = _dir_mode
2281
self._need_to_create = _need_to_create
2282
self._create_parent_dir = _create_parent_dir
3211
def add_raw_records(self, key_sizes, raw_data):
2284
def add_raw_records(self, sizes, raw_data):
3212
2285
"""Add raw knit bytes to a storage area.
3214
The data is spooled to the container writer in one bytes-record per
2287
The data is spooled to whereever the access method is storing data.
3217
:param sizes: An iterable of tuples containing the key and size of each
2289
:param sizes: An iterable containing the size of each raw data segment.
3219
2290
:param raw_data: A bytestring containing the data.
3220
:return: A list of memos to retrieve the record later. Each memo is an
3221
opaque index memo. For _KnitKeyAccess the memo is (key, pos,
3222
length), where the key is the record key.
2291
:return: A list of memos to retrieve the record later. Each memo is a
2292
tuple - (index, pos, length), where the index field is always None
2293
for the .knit access method.
3224
if type(raw_data) is not str:
3225
raise AssertionError(
3226
'data must be plain bytes was %s' % type(raw_data))
2295
assert type(raw_data) == str, \
2296
'data must be plain bytes was %s' % type(raw_data)
2297
if not self._need_to_create:
2298
base = self._transport.append_bytes(self._filename, raw_data)
2300
self._transport.put_bytes_non_atomic(self._filename, raw_data,
2301
create_parent_dir=self._create_parent_dir,
2302
mode=self._file_mode,
2303
dir_mode=self._dir_mode)
2304
self._need_to_create = False
3229
# TODO: This can be tuned for writing to sftp and other servers where
3230
# append() is relatively expensive by grouping the writes to each key
3232
for key, size in key_sizes:
3233
path = self._mapper.map(key)
3235
base = self._transport.append_bytes(path + '.knit',
3236
raw_data[offset:offset+size])
3237
except errors.NoSuchFile:
3238
self._transport.mkdir(osutils.dirname(path))
3239
base = self._transport.append_bytes(path + '.knit',
3240
raw_data[offset:offset+size])
3244
result.append((key, base, size))
2308
result.append((None, base, size))
3248
"""Flush pending writes on this access object.
3250
For .knit files this is a no-op.
2313
"""IFF this data access has its own storage area, initialise it.
2317
self._transport.put_bytes_non_atomic(self._filename, '',
2318
mode=self._file_mode)
2320
def open_file(self):
2321
"""IFF this data access can be represented as a single file, open it.
2323
For knits that are not mapped to a single file on disk this will
2326
:return: None or a file handle.
2329
return self._transport.get(self._filename)
3254
2334
def get_raw_records(self, memos_for_retrieval):
3255
2335
"""Get the raw bytes for a records.
3257
:param memos_for_retrieval: An iterable containing the access memo for
3258
retrieving the bytes.
2337
:param memos_for_retrieval: An iterable containing the (index, pos,
2338
length) memo for retrieving the bytes. The .knit method ignores
2339
the index as there is always only a single file.
3259
2340
:return: An iterator over the bytes of the records.
3261
# first pass, group into same-index request to minimise readv's issued.
3263
current_prefix = None
3264
for (key, offset, length) in memos_for_retrieval:
3265
if current_prefix == key[:-1]:
3266
current_list.append((offset, length))
3268
if current_prefix is not None:
3269
request_lists.append((current_prefix, current_list))
3270
current_prefix = key[:-1]
3271
current_list = [(offset, length)]
3272
# handle the last entry
3273
if current_prefix is not None:
3274
request_lists.append((current_prefix, current_list))
3275
for prefix, read_vector in request_lists:
3276
path = self._mapper.map(prefix) + '.knit'
3277
for pos, data in self._transport.readv(path, read_vector):
3281
class _DirectPackAccess(object):
3282
"""Access to data in one or more packs with less translation."""
3284
def __init__(self, index_to_packs, reload_func=None, flush_func=None):
3285
"""Create a _DirectPackAccess object.
2342
read_vector = [(pos, size) for (index, pos, size) in memos_for_retrieval]
2343
for pos, data in self._transport.readv(self._filename, read_vector):
2347
class _PackAccess(object):
2348
"""Access to knit records via a collection of packs."""
2350
def __init__(self, index_to_packs, writer=None):
2351
"""Create a _PackAccess object.
3287
2353
:param index_to_packs: A dict mapping index objects to the transport
3288
2354
and file names for obtaining data.
3289
:param reload_func: A function to call if we determine that the pack
3290
files have moved and we need to reload our caches. See
3291
bzrlib.repo_fmt.pack_repo.AggregateIndex for more details.
2355
:param writer: A tuple (pack.ContainerWriter, write_index) which
2356
contains the pack to write, and the index that reads from it will
3293
self._container_writer = None
3294
self._write_index = None
3295
self._indices = index_to_packs
3296
self._reload_func = reload_func
3297
self._flush_func = flush_func
2360
self.container_writer = writer[0]
2361
self.write_index = writer[1]
2363
self.container_writer = None
2364
self.write_index = None
2365
self.indices = index_to_packs
3299
def add_raw_records(self, key_sizes, raw_data):
2367
def add_raw_records(self, sizes, raw_data):
3300
2368
"""Add raw knit bytes to a storage area.
3302
2370
The data is spooled to the container writer in one bytes-record per
3305
:param sizes: An iterable of tuples containing the key and size of each
2373
:param sizes: An iterable containing the size of each raw data segment.
3307
2374
:param raw_data: A bytestring containing the data.
3308
:return: A list of memos to retrieve the record later. Each memo is an
3309
opaque index memo. For _DirectPackAccess the memo is (index, pos,
3310
length), where the index field is the write_index object supplied
3311
to the PackAccess object.
2375
:return: A list of memos to retrieve the record later. Each memo is a
2376
tuple - (index, pos, length), where the index field is the
2377
write_index object supplied to the PackAccess object.
3313
if type(raw_data) is not str:
3314
raise AssertionError(
3315
'data must be plain bytes was %s' % type(raw_data))
2379
assert type(raw_data) == str, \
2380
'data must be plain bytes was %s' % type(raw_data)
3318
for key, size in key_sizes:
3319
p_offset, p_length = self._container_writer.add_bytes_record(
2384
p_offset, p_length = self.container_writer.add_bytes_record(
3320
2385
raw_data[offset:offset+size], [])
3322
result.append((self._write_index, p_offset, p_length))
2387
result.append((self.write_index, p_offset, p_length))
3326
"""Flush pending writes on this access object.
2391
"""Pack based knits do not get individually created."""
3328
This will flush any buffered writes to a NewPack.
3330
if self._flush_func is not None:
3333
2393
def get_raw_records(self, memos_for_retrieval):
3334
2394
"""Get the raw bytes for a records.
3336
:param memos_for_retrieval: An iterable containing the (index, pos,
2396
:param memos_for_retrieval: An iterable containing the (index, pos,
3337
2397
length) memo for retrieving the bytes. The Pack access method
3338
2398
looks up the pack to use for a given record in its index_to_pack
3354
2414
if current_index is not None:
3355
2415
request_lists.append((current_index, current_list))
3356
2416
for index, offsets in request_lists:
3358
transport, path = self._indices[index]
3360
# A KeyError here indicates that someone has triggered an index
3361
# reload, and this index has gone missing, we need to start
3363
if self._reload_func is None:
3364
# If we don't have a _reload_func there is nothing that can
3367
raise errors.RetryWithNewPacks(index,
3368
reload_occurred=True,
3369
exc_info=sys.exc_info())
3371
reader = pack.make_readv_reader(transport, path, offsets)
3372
for names, read_func in reader.iter_records():
3373
yield read_func(None)
3374
except errors.NoSuchFile:
3375
# A NoSuchFile error indicates that a pack file has gone
3376
# missing on disk, we need to trigger a reload, and start over.
3377
if self._reload_func is None:
3379
raise errors.RetryWithNewPacks(transport.abspath(path),
3380
reload_occurred=False,
3381
exc_info=sys.exc_info())
3383
def set_writer(self, writer, index, transport_packname):
2417
transport, path = self.indices[index]
2418
reader = pack.make_readv_reader(transport, path, offsets)
2419
for names, read_func in reader.iter_records():
2420
yield read_func(None)
2422
def open_file(self):
2423
"""Pack based knits have no single file."""
2426
def set_writer(self, writer, index, (transport, packname)):
3384
2427
"""Set a writer to use for adding data."""
3385
2428
if index is not None:
3386
self._indices[index] = transport_packname
3387
self._container_writer = writer
3388
self._write_index = index
3390
def reload_or_raise(self, retry_exc):
3391
"""Try calling the reload function, or re-raise the original exception.
3393
This should be called after _DirectPackAccess raises a
3394
RetryWithNewPacks exception. This function will handle the common logic
3395
of determining when the error is fatal versus being temporary.
3396
It will also make sure that the original exception is raised, rather
3397
than the RetryWithNewPacks exception.
3399
If this function returns, then the calling function should retry
3400
whatever operation was being performed. Otherwise an exception will
3403
:param retry_exc: A RetryWithNewPacks exception.
3406
if self._reload_func is None:
3408
elif not self._reload_func():
3409
# The reload claimed that nothing changed
3410
if not retry_exc.reload_occurred:
3411
# If there wasn't an earlier reload, then we really were
3412
# expecting to find changes. We didn't find them, so this is a
3416
exc_class, exc_value, exc_traceback = retry_exc.exc_info
3417
raise exc_class, exc_value, exc_traceback
2429
self.indices[index] = (transport, packname)
2430
self.container_writer = writer
2431
self.write_index = index
2434
class _StreamAccess(object):
2435
"""A Knit Access object that provides data from a datastream.
2437
It also provides a fallback to present as unannotated data, annotated data
2438
from a *backing* access object.
2440
This is triggered by a index_memo which is pointing to a different index
2441
than this was constructed with, and is used to allow extracting full
2442
unannotated texts for insertion into annotated knits.
2445
def __init__(self, reader_callable, stream_index, backing_knit,
2447
"""Create a _StreamAccess object.
2449
:param reader_callable: The reader_callable from the datastream.
2450
This is called to buffer all the data immediately, for
2452
:param stream_index: The index the data stream this provides access to
2453
which will be present in native index_memo's.
2454
:param backing_knit: The knit object that will provide access to
2455
annotated texts which are not available in the stream, so as to
2456
create unannotated texts.
2457
:param orig_factory: The original content factory used to generate the
2458
stream. This is used for checking whether the thunk code for
2459
supporting _copy_texts will generate the correct form of data.
2461
self.data = reader_callable(None)
2462
self.stream_index = stream_index
2463
self.backing_knit = backing_knit
2464
self.orig_factory = orig_factory
2466
def get_raw_records(self, memos_for_retrieval):
2467
"""Get the raw bytes for a records.
2469
:param memos_for_retrieval: An iterable of memos from the
2470
_StreamIndex object identifying bytes to read; for these classes
2471
they are (from_backing_knit, index, start, end) and can point to
2472
either the backing knit or streamed data.
2473
:return: An iterator yielding a byte string for each record in
2474
memos_for_retrieval.
2476
# use a generator for memory friendliness
2477
for from_backing_knit, version_id, start, end in memos_for_retrieval:
2478
if not from_backing_knit:
2479
assert version_id is self.stream_index
2480
yield self.data[start:end]
2482
# we have been asked to thunk. This thunking only occurs when
2483
# we are obtaining plain texts from an annotated backing knit
2484
# so that _copy_texts will work.
2485
# We could improve performance here by scanning for where we need
2486
# to do this and using get_line_list, then interleaving the output
2487
# as desired. However, for now, this is sufficient.
2488
if self.orig_factory.__class__ != KnitPlainFactory:
2489
raise errors.KnitCorrupt(
2490
self, 'Bad thunk request %r cannot be backed by %r' %
2491
(version_id, self.orig_factory))
2492
lines = self.backing_knit.get_lines(version_id)
2493
line_bytes = ''.join(lines)
2494
digest = sha_string(line_bytes)
2495
# the packed form of the fulltext always has a trailing newline,
2496
# even if the actual text does not, unless the file is empty. the
2497
# record options including the noeol flag are passed through by
2498
# _StreamIndex, so this is safe.
2500
if lines[-1][-1] != '\n':
2501
lines[-1] = lines[-1] + '\n'
2503
# We want plain data, because we expect to thunk only to allow text
2505
size, bytes = self.backing_knit._data._record_to_data(version_id,
2506
digest, lines, line_bytes)
2510
class _StreamIndex(object):
2511
"""A Knit Index object that uses the data map from a datastream."""
2513
def __init__(self, data_list, backing_index):
2514
"""Create a _StreamIndex object.
2516
:param data_list: The data_list from the datastream.
2517
:param backing_index: The index which will supply values for nodes
2518
referenced outside of this stream.
2520
self.data_list = data_list
2521
self.backing_index = backing_index
2522
self._by_version = {}
2524
for key, options, length, parents in data_list:
2525
self._by_version[key] = options, (pos, pos + length), parents
2528
def get_ancestry(self, versions, topo_sorted):
2529
"""Get an ancestry list for versions."""
2531
# Not needed for basic joins
2532
raise NotImplementedError(self.get_ancestry)
2533
# get a graph of all the mentioned versions:
2534
# Little ugly - basically copied from KnitIndex, but don't want to
2535
# accidentally incorporate too much of that index's code.
2537
pending = set(versions)
2538
cache = self._by_version
2540
version = pending.pop()
2543
parents = [p for p in cache[version][2] if p in cache]
2545
raise RevisionNotPresent(version, self)
2546
# if not completed and not a ghost
2547
pending.update([p for p in parents if p not in ancestry])
2548
ancestry.add(version)
2549
return list(ancestry)
2551
def get_build_details(self, version_ids):
2552
"""Get the method, index_memo and compression parent for version_ids.
2554
Ghosts are omitted from the result.
2556
:param version_ids: An iterable of version_ids.
2557
:return: A dict of version_id:(index_memo, compression_parent,
2558
parents, record_details).
2560
opaque memo that can be passed to _StreamAccess.read_records
2561
to extract the raw data; for these classes it is
2562
(from_backing_knit, index, start, end)
2564
Content that this record is built upon, may be None
2566
Logical parents of this node
2568
extra information about the content which needs to be passed to
2569
Factory.parse_record
2572
for version_id in version_ids:
2574
method = self.get_method(version_id)
2575
except errors.RevisionNotPresent:
2576
# ghosts are omitted
2578
parent_ids = self.get_parents_with_ghosts(version_id)
2579
noeol = ('no-eol' in self.get_options(version_id))
2580
index_memo = self.get_position(version_id)
2581
from_backing_knit = index_memo[0]
2582
if from_backing_knit:
2583
# texts retrieved from the backing knit are always full texts
2585
if method == 'fulltext':
2586
compression_parent = None
2588
compression_parent = parent_ids[0]
2589
result[version_id] = (index_memo, compression_parent,
2590
parent_ids, (method, noeol))
2593
def get_method(self, version_id):
2594
"""Return compression method of specified version."""
2595
options = self.get_options(version_id)
2596
if 'fulltext' in options:
2598
elif 'line-delta' in options:
2601
raise errors.KnitIndexUnknownMethod(self, options)
2603
def get_options(self, version_id):
2604
"""Return a list representing options.
2609
return self._by_version[version_id][0]
2611
options = list(self.backing_index.get_options(version_id))
2612
if 'fulltext' in options:
2614
elif 'line-delta' in options:
2615
# Texts from the backing knit are always returned from the stream
2617
options.remove('line-delta')
2618
options.append('fulltext')
2620
raise errors.KnitIndexUnknownMethod(self, options)
2621
return tuple(options)
2623
def get_parent_map(self, version_ids):
2624
"""Passed through to by KnitVersionedFile.get_parent_map."""
2627
for version_id in version_ids:
2629
result[version_id] = self._by_version[version_id][2]
2631
pending_ids.add(version_id)
2632
result.update(self.backing_index.get_parent_map(pending_ids))
2635
def get_parents_with_ghosts(self, version_id):
2636
"""Return parents of specified version with ghosts."""
2638
return self.get_parent_map([version_id])[version_id]
2640
raise RevisionNotPresent(version_id, self)
2642
def get_position(self, version_id):
2643
"""Return details needed to access the version.
2645
_StreamAccess has the data as a big array, so we return slice
2646
coordinates into that (as index_memo's are opaque outside the
2647
index and matching access class).
2649
:return: a tuple (from_backing_knit, index, start, end) that can
2650
be passed e.g. to get_raw_records.
2651
If from_backing_knit is False, index will be self, otherwise it
2652
will be a version id.
2655
start, end = self._by_version[version_id][1]
2656
return False, self, start, end
2658
# Signal to the access object to handle this from the backing knit.
2659
return (True, version_id, None, None)
2661
def get_versions(self):
2662
"""Get all the versions in the stream."""
2663
return self._by_version.keys()
2666
class _KnitData(object):
2667
"""Manage extraction of data from a KnitAccess, caching and decompressing.
2669
The KnitData class provides the logic for parsing and using knit records,
2670
making use of an access method for the low level read and write operations.
2673
def __init__(self, access):
2674
"""Create a KnitData object.
2676
:param access: The access method to use. Access methods such as
2677
_KnitAccess manage the insertion of raw records and the subsequent
2678
retrieval of the same.
2680
self._access = access
2681
self._checked = False
2683
def _open_file(self):
2684
return self._access.open_file()
2686
def _record_to_data(self, version_id, digest, lines, dense_lines=None):
2687
"""Convert version_id, digest, lines into a raw data block.
2689
:param dense_lines: The bytes of lines but in a denser form. For
2690
instance, if lines is a list of 1000 bytestrings each ending in \n,
2691
dense_lines may be a list with one line in it, containing all the
2692
1000's lines and their \n's. Using dense_lines if it is already
2693
known is a win because the string join to create bytes in this
2694
function spends less time resizing the final string.
2695
:return: (len, a StringIO instance with the raw data ready to read.)
2697
# Note: using a string copy here increases memory pressure with e.g.
2698
# ISO's, but it is about 3 seconds faster on a 1.2Ghz intel machine
2699
# when doing the initial commit of a mozilla tree. RBC 20070921
2700
bytes = ''.join(chain(
2701
["version %s %d %s\n" % (version_id,
2704
dense_lines or lines,
2705
["end %s\n" % version_id]))
2706
assert bytes.__class__ == str
2707
compressed_bytes = bytes_to_gzip(bytes)
2708
return len(compressed_bytes), compressed_bytes
2710
def add_raw_records(self, sizes, raw_data):
2711
"""Append a prepared record to the data file.
2713
:param sizes: An iterable containing the size of each raw data segment.
2714
:param raw_data: A bytestring containing the data.
2715
:return: a list of index data for the way the data was stored.
2716
See the access method add_raw_records documentation for more
2719
return self._access.add_raw_records(sizes, raw_data)
2721
def _parse_record_header(self, version_id, raw_data):
2722
"""Parse a record header for consistency.
2724
:return: the header and the decompressor stream.
2725
as (stream, header_record)
2727
df = GzipFile(mode='rb', fileobj=StringIO(raw_data))
2729
rec = self._check_header(version_id, df.readline())
2730
except Exception, e:
2731
raise KnitCorrupt(self._access,
2732
"While reading {%s} got %s(%s)"
2733
% (version_id, e.__class__.__name__, str(e)))
2736
def _split_header(self, line):
2739
raise KnitCorrupt(self._access,
2740
'unexpected number of elements in record header')
2743
def _check_header_version(self, rec, version_id):
2744
if rec[1] != version_id:
2745
raise KnitCorrupt(self._access,
2746
'unexpected version, wanted %r, got %r'
2747
% (version_id, rec[1]))
2749
def _check_header(self, version_id, line):
2750
rec = self._split_header(line)
2751
self._check_header_version(rec, version_id)
2754
def _parse_record_unchecked(self, data):
2756
# 4168 calls in 2880 217 internal
2757
# 4168 calls to _parse_record_header in 2121
2758
# 4168 calls to readlines in 330
2759
df = GzipFile(mode='rb', fileobj=StringIO(data))
2761
record_contents = df.readlines()
2762
except Exception, e:
2763
raise KnitCorrupt(self._access, "Corrupt compressed record %r, got %s(%s)" %
2764
(data, e.__class__.__name__, str(e)))
2765
header = record_contents.pop(0)
2766
rec = self._split_header(header)
2767
last_line = record_contents.pop()
2768
if len(record_contents) != int(rec[2]):
2769
raise KnitCorrupt(self._access,
2770
'incorrect number of lines %s != %s'
2772
% (len(record_contents), int(rec[2]),
2774
if last_line != 'end %s\n' % rec[1]:
2775
raise KnitCorrupt(self._access,
2776
'unexpected version end line %r, wanted %r'
2777
% (last_line, rec[1]))
2779
return rec, record_contents
2781
def _parse_record(self, version_id, data):
2782
rec, record_contents = self._parse_record_unchecked(data)
2783
self._check_header_version(rec, version_id)
2784
return record_contents, rec[3]
2786
def read_records_iter_raw(self, records):
2787
"""Read text records from data file and yield raw data.
2789
This unpacks enough of the text record to validate the id is
2790
as expected but thats all.
2792
Each item the iterator yields is (version_id, bytes,
2795
# setup an iterator of the external records:
2796
# uses readv so nice and fast we hope.
2798
# grab the disk data needed.
2799
needed_offsets = [index_memo for version_id, index_memo
2801
raw_records = self._access.get_raw_records(needed_offsets)
2803
for version_id, index_memo in records:
2804
data = raw_records.next()
2805
# validate the header
2806
df, rec = self._parse_record_header(version_id, data)
2808
yield version_id, data, rec[3]
2810
def read_records_iter(self, records):
2811
"""Read text records from data file and yield result.
2813
The result will be returned in whatever is the fastest to read.
2814
Not by the order requested. Also, multiple requests for the same
2815
record will only yield 1 response.
2816
:param records: A list of (version_id, pos, len) entries
2817
:return: Yields (version_id, contents, digest) in the order
2818
read, not the order requested
2823
needed_records = sorted(set(records), key=operator.itemgetter(1))
2824
if not needed_records:
2827
# The transport optimizes the fetching as well
2828
# (ie, reads continuous ranges.)
2829
raw_data = self._access.get_raw_records(
2830
[index_memo for version_id, index_memo in needed_records])
2832
for (version_id, index_memo), data in \
2833
izip(iter(needed_records), raw_data):
2834
content, digest = self._parse_record(version_id, data)
2835
yield version_id, content, digest
2837
def read_records(self, records):
2838
"""Read records into a dictionary."""
2840
for record_id, content, digest in \
2841
self.read_records_iter(records):
2842
components[record_id] = (content, digest)
2846
class InterKnit(InterVersionedFile):
2847
"""Optimised code paths for knit to knit operations."""
2849
_matching_file_from_factory = staticmethod(make_file_knit)
2850
_matching_file_to_factory = staticmethod(make_file_knit)
2853
def is_compatible(source, target):
2854
"""Be compatible with knits. """
2856
return (isinstance(source, KnitVersionedFile) and
2857
isinstance(target, KnitVersionedFile))
2858
except AttributeError:
2861
def _copy_texts(self, pb, msg, version_ids, ignore_missing=False):
2862
"""Copy texts to the target by extracting and adding them one by one.
2864
see join() for the parameter definitions.
2866
version_ids = self._get_source_version_ids(version_ids, ignore_missing)
2867
# --- the below is factorable out with VersionedFile.join, but wait for
2868
# VersionedFiles, it may all be simpler then.
2869
graph = Graph(self.source)
2870
search = graph._make_breadth_first_searcher(version_ids)
2871
transitive_ids = set()
2872
map(transitive_ids.update, list(search))
2873
parent_map = self.source.get_parent_map(transitive_ids)
2874
order = topo_sort(parent_map.items())
2876
def size_of_content(content):
2877
return sum(len(line) for line in content.text())
2878
# Cache at most 10MB of parent texts
2879
parent_cache = lru_cache.LRUSizeCache(max_size=10*1024*1024,
2880
compute_size=size_of_content)
2881
# TODO: jam 20071116 It would be nice to have a streaming interface to
2882
# get multiple texts from a source. The source could be smarter
2883
# about how it handled intermediate stages.
2884
# get_line_list() or make_mpdiffs() seem like a possibility, but
2885
# at the moment they extract all full texts into memory, which
2886
# causes us to store more than our 3x fulltext goal.
2887
# Repository.iter_files_bytes() may be another possibility
2888
to_process = [version for version in order
2889
if version not in self.target]
2890
total = len(to_process)
2891
pb = ui.ui_factory.nested_progress_bar()
2893
for index, version in enumerate(to_process):
2894
pb.update('Converting versioned data', index, total)
2895
sha1, num_bytes, parent_text = self.target.add_lines(version,
2896
self.source.get_parents_with_ghosts(version),
2897
self.source.get_lines(version),
2898
parent_texts=parent_cache)
2899
parent_cache[version] = parent_text
2904
def join(self, pb=None, msg=None, version_ids=None, ignore_missing=False):
2905
"""See InterVersionedFile.join."""
2906
assert isinstance(self.source, KnitVersionedFile)
2907
assert isinstance(self.target, KnitVersionedFile)
2909
# If the source and target are mismatched w.r.t. annotations vs
2910
# plain, the data needs to be converted accordingly
2911
if self.source.factory.annotated == self.target.factory.annotated:
2913
elif self.source.factory.annotated:
2914
converter = self._anno_to_plain_converter
2916
# We're converting from a plain to an annotated knit. Copy them
2917
# across by full texts.
2918
return self._copy_texts(pb, msg, version_ids, ignore_missing)
2920
version_ids = self._get_source_version_ids(version_ids, ignore_missing)
2924
pb = ui.ui_factory.nested_progress_bar()
2926
version_ids = list(version_ids)
2927
if None in version_ids:
2928
version_ids.remove(None)
2930
self.source_ancestry = set(self.source.get_ancestry(version_ids,
2932
this_versions = set(self.target._index.get_versions())
2933
# XXX: For efficiency we should not look at the whole index,
2934
# we only need to consider the referenced revisions - they
2935
# must all be present, or the method must be full-text.
2936
# TODO, RBC 20070919
2937
needed_versions = self.source_ancestry - this_versions
2939
if not needed_versions:
2941
full_list = topo_sort(
2942
self.source.get_parent_map(self.source.versions()))
2944
version_list = [i for i in full_list if (not self.target.has_version(i)
2945
and i in needed_versions)]
2949
copy_queue_records = []
2951
for version_id in version_list:
2952
options = self.source._index.get_options(version_id)
2953
parents = self.source._index.get_parents_with_ghosts(version_id)
2954
# check that its will be a consistent copy:
2955
for parent in parents:
2956
# if source has the parent, we must :
2957
# * already have it or
2958
# * have it scheduled already
2959
# otherwise we don't care
2960
assert (self.target.has_version(parent) or
2961
parent in copy_set or
2962
not self.source.has_version(parent))
2963
index_memo = self.source._index.get_position(version_id)
2964
copy_queue_records.append((version_id, index_memo))
2965
copy_queue.append((version_id, options, parents))
2966
copy_set.add(version_id)
2968
# data suck the join:
2970
total = len(version_list)
2973
for (version_id, raw_data, _), \
2974
(version_id2, options, parents) in \
2975
izip(self.source._data.read_records_iter_raw(copy_queue_records),
2977
assert version_id == version_id2, 'logic error, inconsistent results'
2979
pb.update("Joining knit", count, total)
2981
size, raw_data = converter(raw_data, version_id, options,
2984
size = len(raw_data)
2985
raw_records.append((version_id, options, parents, size))
2986
raw_datum.append(raw_data)
2987
self.target._add_raw_records(raw_records, ''.join(raw_datum))
2992
def _anno_to_plain_converter(self, raw_data, version_id, options,
2994
"""Convert annotated content to plain content."""
2995
data, digest = self.source._data._parse_record(version_id, raw_data)
2996
if 'fulltext' in options:
2997
content = self.source.factory.parse_fulltext(data, version_id)
2998
lines = self.target.factory.lower_fulltext(content)
3000
delta = self.source.factory.parse_line_delta(data, version_id,
3002
lines = self.target.factory.lower_line_delta(delta)
3003
return self.target._data._record_to_data(version_id, digest, lines)
3006
InterVersionedFile.register_optimiser(InterKnit)
3009
class WeaveToKnit(InterVersionedFile):
3010
"""Optimised code paths for weave to knit operations."""
3012
_matching_file_from_factory = bzrlib.weave.WeaveFile
3013
_matching_file_to_factory = staticmethod(make_file_knit)
3016
def is_compatible(source, target):
3017
"""Be compatible with weaves to knits."""
3019
return (isinstance(source, bzrlib.weave.Weave) and
3020
isinstance(target, KnitVersionedFile))
3021
except AttributeError:
3024
def join(self, pb=None, msg=None, version_ids=None, ignore_missing=False):
3025
"""See InterVersionedFile.join."""
3026
assert isinstance(self.source, bzrlib.weave.Weave)
3027
assert isinstance(self.target, KnitVersionedFile)
3029
version_ids = self._get_source_version_ids(version_ids, ignore_missing)
3034
pb = ui.ui_factory.nested_progress_bar()
3036
version_ids = list(version_ids)
3038
self.source_ancestry = set(self.source.get_ancestry(version_ids))
3039
this_versions = set(self.target._index.get_versions())
3040
needed_versions = self.source_ancestry - this_versions
3042
if not needed_versions:
3044
full_list = topo_sort(
3045
self.source.get_parent_map(self.source.versions()))
3047
version_list = [i for i in full_list if (not self.target.has_version(i)
3048
and i in needed_versions)]
3052
total = len(version_list)
3053
parent_map = self.source.get_parent_map(version_list)
3054
for version_id in version_list:
3055
pb.update("Converting to knit", count, total)
3056
parents = parent_map[version_id]
3057
# check that its will be a consistent copy:
3058
for parent in parents:
3059
# if source has the parent, we must already have it
3060
assert (self.target.has_version(parent))
3061
self.target.add_lines(
3062
version_id, parents, self.source.get_lines(version_id))
3069
InterVersionedFile.register_optimiser(WeaveToKnit)
3420
3072
# Deprecated, use PatienceSequenceMatcher instead
3465
3187
parents to create an annotation, but only need 1 parent to generate the
3468
:return: A list of (key, index_memo) records, suitable for
3469
passing to read_records_iter to start reading in the raw data from
3190
:return: A list of (revision_id, index_memo) records, suitable for
3191
passing to read_records_iter to start reading in the raw data fro/
3472
pending = set([key])
3194
if revision_id in self._annotated_lines:
3197
pending = set([revision_id])
3475
self._num_needed_children[key] = 1
3477
3202
# get all pending nodes
3478
3204
this_iteration = pending
3479
build_details = self._vf._index.get_build_details(this_iteration)
3205
build_details = self._knit._index.get_build_details(this_iteration)
3480
3206
self._all_build_details.update(build_details)
3481
# new_nodes = self._vf._index._get_entries(this_iteration)
3207
# new_nodes = self._knit._index._get_entries(this_iteration)
3482
3208
pending = set()
3483
for key, details in build_details.iteritems():
3484
(index_memo, compression_parent, parent_keys,
3209
for rev_id, details in build_details.iteritems():
3210
(index_memo, compression_parent, parents,
3485
3211
record_details) = details
3486
self._parent_map[key] = parent_keys
3487
self._heads_provider = None
3488
records.append((key, index_memo))
3212
self._revision_id_graph[rev_id] = parents
3213
records.append((rev_id, index_memo))
3489
3214
# Do we actually need to check _annotated_lines?
3490
pending.update([p for p in parent_keys
3491
if p not in self._all_build_details])
3493
for parent_key in parent_keys:
3494
if parent_key in self._num_needed_children:
3495
self._num_needed_children[parent_key] += 1
3497
self._num_needed_children[parent_key] = 1
3215
pending.update(p for p in parents
3216
if p not in self._all_build_details)
3498
3217
if compression_parent:
3499
if compression_parent in self._num_compression_children:
3500
self._num_compression_children[compression_parent] += 1
3502
self._num_compression_children[compression_parent] = 1
3218
self._compression_children.setdefault(compression_parent,
3221
for parent in parents:
3222
self._annotate_children.setdefault(parent,
3224
num_gens = generation - kept_generation
3225
if ((num_gens >= self._generations_until_keep)
3226
and len(parents) > 1):
3227
kept_generation = generation
3228
self._nodes_to_keep_annotations.add(rev_id)
3504
3230
missing_versions = this_iteration.difference(build_details.keys())
3505
if missing_versions:
3506
for key in missing_versions:
3507
if key in self._parent_map and key in self._text_cache:
3508
# We already have this text ready, we just need to
3509
# yield it later so we get it annotated
3511
parent_keys = self._parent_map[key]
3512
for parent_key in parent_keys:
3513
if parent_key in self._num_needed_children:
3514
self._num_needed_children[parent_key] += 1
3516
self._num_needed_children[parent_key] = 1
3517
pending.update([p for p in parent_keys
3518
if p not in self._all_build_details])
3520
raise errors.RevisionNotPresent(key, self._vf)
3231
self._ghosts.update(missing_versions)
3232
for missing_version in missing_versions:
3233
# add a key, no parents
3234
self._revision_id_graph[missing_version] = ()
3235
pending.discard(missing_version) # don't look for it
3236
# XXX: This should probably be a real exception, as it is a data
3238
assert not self._ghosts.intersection(self._compression_children), \
3239
"We cannot have nodes which have a compression parent of a ghost."
3240
# Cleanout anything that depends on a ghost so that we don't wait for
3241
# the ghost to show up
3242
for node in self._ghosts:
3243
if node in self._annotate_children:
3244
# We won't be building this node
3245
del self._annotate_children[node]
3521
3246
# Generally we will want to read the records in reverse order, because
3522
3247
# we find the parent nodes after the children
3523
3248
records.reverse()
3524
return records, ann_keys
3526
def _get_needed_texts(self, key, pb=None):
3527
# if True or len(self._vf._fallback_vfs) > 0:
3528
if len(self._vf._fallback_vfs) > 0:
3529
# If we have fallbacks, go to the generic path
3530
for v in annotate.Annotator._get_needed_texts(self, key, pb=pb):
3535
records, ann_keys = self._get_build_graph(key)
3536
for idx, (sub_key, text, num_lines) in enumerate(
3537
self._extract_texts(records)):
3539
pb.update('annotating', idx, len(records))
3540
yield sub_key, text, num_lines
3541
for sub_key in ann_keys:
3542
text = self._text_cache[sub_key]
3543
num_lines = len(text) # bad assumption
3544
yield sub_key, text, num_lines
3546
except errors.RetryWithNewPacks, e:
3547
self._vf._access.reload_or_raise(e)
3548
# The cached build_details are no longer valid
3549
self._all_build_details.clear()
3551
def _cache_delta_blocks(self, key, compression_parent, delta, lines):
3552
parent_lines = self._text_cache[compression_parent]
3553
blocks = list(KnitContent.get_line_delta_blocks(delta, parent_lines, lines))
3554
self._matching_blocks[(key, compression_parent)] = blocks
3556
def _expand_record(self, key, parent_keys, compression_parent, record,
3559
if compression_parent:
3560
if compression_parent not in self._content_objects:
3561
# Waiting for the parent
3562
self._pending_deltas.setdefault(compression_parent, []).append(
3563
(key, parent_keys, record, record_details))
3565
# We have the basis parent, so expand the delta
3566
num = self._num_compression_children[compression_parent]
3569
base_content = self._content_objects.pop(compression_parent)
3570
self._num_compression_children.pop(compression_parent)
3572
self._num_compression_children[compression_parent] = num
3573
base_content = self._content_objects[compression_parent]
3574
# It is tempting to want to copy_base_content=False for the last
3575
# child object. However, whenever noeol=False,
3576
# self._text_cache[parent_key] is content._lines. So mutating it
3577
# gives very bad results.
3578
# The alternative is to copy the lines into text cache, but then we
3579
# are copying anyway, so just do it here.
3580
content, delta = self._vf._factory.parse_record(
3581
key, record, record_details, base_content,
3582
copy_base_content=True)
3585
content, _ = self._vf._factory.parse_record(
3586
key, record, record_details, None)
3587
if self._num_compression_children.get(key, 0) > 0:
3588
self._content_objects[key] = content
3589
lines = content.text()
3590
self._text_cache[key] = lines
3591
if delta is not None:
3592
self._cache_delta_blocks(key, compression_parent, delta, lines)
3595
def _get_parent_annotations_and_matches(self, key, text, parent_key):
3596
"""Get the list of annotations for the parent, and the matching lines.
3598
:param text: The opaque value given by _get_needed_texts
3599
:param parent_key: The key for the parent text
3600
:return: (parent_annotations, matching_blocks)
3601
parent_annotations is a list as long as the number of lines in
3603
matching_blocks is a list of (parent_idx, text_idx, len) tuples
3604
indicating which lines match between the two texts
3606
block_key = (key, parent_key)
3607
if block_key in self._matching_blocks:
3608
blocks = self._matching_blocks.pop(block_key)
3609
parent_annotations = self._annotations_cache[parent_key]
3610
return parent_annotations, blocks
3611
return annotate.Annotator._get_parent_annotations_and_matches(self,
3612
key, text, parent_key)
3614
def _process_pending(self, key):
3615
"""The content for 'key' was just processed.
3617
Determine if there is any more pending work to be processed.
3620
if key in self._pending_deltas:
3621
compression_parent = key
3622
children = self._pending_deltas.pop(key)
3623
for child_key, parent_keys, record, record_details in children:
3624
lines = self._expand_record(child_key, parent_keys,
3626
record, record_details)
3627
if self._check_ready_for_annotations(child_key, parent_keys):
3628
to_return.append(child_key)
3629
# Also check any children that are waiting for this parent to be
3631
if key in self._pending_annotation:
3632
children = self._pending_annotation.pop(key)
3633
to_return.extend([c for c, p_keys in children
3634
if self._check_ready_for_annotations(c, p_keys)])
3637
def _check_ready_for_annotations(self, key, parent_keys):
3638
"""return true if this text is ready to be yielded.
3640
Otherwise, this will return False, and queue the text into
3641
self._pending_annotation
3643
for parent_key in parent_keys:
3644
if parent_key not in self._annotations_cache:
3645
# still waiting on at least one parent text, so queue it up
3646
# Note that if there are multiple parents, we need to wait
3648
self._pending_annotation.setdefault(parent_key,
3649
[]).append((key, parent_keys))
3653
def _extract_texts(self, records):
3654
"""Extract the various texts needed based on records"""
3251
def _annotate_records(self, records):
3252
"""Build the annotations for the listed records."""
3655
3253
# We iterate in the order read, rather than a strict order requested
3656
3254
# However, process what we can, and put off to the side things that
3657
3255
# still need parents, cleaning them up when those parents are
3660
# 1) As 'records' are read, see if we can expand these records into
3661
# Content objects (and thus lines)
3662
# 2) If a given line-delta is waiting on its compression parent, it
3663
# gets queued up into self._pending_deltas, otherwise we expand
3664
# it, and put it into self._text_cache and self._content_objects
3665
# 3) If we expanded the text, we will then check to see if all
3666
# parents have also been processed. If so, this text gets yielded,
3667
# else this record gets set aside into pending_annotation
3668
# 4) Further, if we expanded the text in (2), we will then check to
3669
# see if there are any children in self._pending_deltas waiting to
3670
# also be processed. If so, we go back to (2) for those
3671
# 5) Further again, if we yielded the text, we can then check if that
3672
# 'unlocks' any of the texts in pending_annotations, which should
3673
# then get yielded as well
3674
# Note that both steps 4 and 5 are 'recursive' in that unlocking one
3675
# compression child could unlock yet another, and yielding a fulltext
3676
# will also 'unlock' the children that are waiting on that annotation.
3677
# (Though also, unlocking 1 parent's fulltext, does not unlock a child
3678
# if other parents are also waiting.)
3679
# We want to yield content before expanding child content objects, so
3680
# that we know when we can re-use the content lines, and the annotation
3681
# code can know when it can stop caching fulltexts, as well.
3683
# Children that are missing their compression parent
3685
for (key, record, digest) in self._vf._read_records_iter(records):
3687
details = self._all_build_details[key]
3688
(_, compression_parent, parent_keys, record_details) = details
3689
lines = self._expand_record(key, parent_keys, compression_parent,
3690
record, record_details)
3692
# Pending delta should be queued up
3257
for (rev_id, record,
3258
digest) in self._knit._data.read_records_iter(records):
3259
if rev_id in self._annotated_lines:
3694
# At this point, we may be able to yield this content, if all
3695
# parents are also finished
3696
yield_this_text = self._check_ready_for_annotations(key,
3699
# All parents present
3700
yield key, lines, len(lines)
3701
to_process = self._process_pending(key)
3703
this_process = to_process
3705
for key in this_process:
3706
lines = self._text_cache[key]
3707
yield key, lines, len(lines)
3708
to_process.extend(self._process_pending(key))
3261
parent_ids = self._revision_id_graph[rev_id]
3262
parent_ids = [p for p in parent_ids if p not in self._ghosts]
3263
details = self._all_build_details[rev_id]
3264
(index_memo, compression_parent, parents,
3265
record_details) = details
3266
nodes_to_annotate = []
3267
# TODO: Remove the punning between compression parents, and
3268
# parent_ids, we should be able to do this without assuming
3270
if len(parent_ids) == 0:
3271
# There are no parents for this node, so just add it
3272
# TODO: This probably needs to be decoupled
3273
assert compression_parent is None
3274
fulltext_content, delta = self._knit.factory.parse_record(
3275
rev_id, record, record_details, None)
3276
fulltext = self._add_fulltext_content(rev_id, fulltext_content)
3277
nodes_to_annotate.extend(self._add_annotation(rev_id, fulltext,
3278
parent_ids, left_matching_blocks=None))
3280
child = (rev_id, parent_ids, record)
3281
# Check if all the parents are present
3282
self._check_parents(child, nodes_to_annotate)
3283
while nodes_to_annotate:
3284
# Should we use a queue here instead of a stack?
3285
(rev_id, parent_ids, record) = nodes_to_annotate.pop()
3286
(index_memo, compression_parent, parents,
3287
record_details) = self._all_build_details[rev_id]
3288
if compression_parent is not None:
3289
comp_children = self._compression_children[compression_parent]
3290
assert rev_id in comp_children
3291
# If there is only 1 child, it is safe to reuse this
3293
reuse_content = (len(comp_children) == 1
3294
and compression_parent not in
3295
self._nodes_to_keep_annotations)
3297
# Remove it from the cache since it will be changing
3298
parent_fulltext_content = self._fulltext_contents.pop(compression_parent)
3299
# Make sure to copy the fulltext since it might be
3301
parent_fulltext = list(parent_fulltext_content.text())
3303
parent_fulltext_content = self._fulltext_contents[compression_parent]
3304
parent_fulltext = parent_fulltext_content.text()
3305
comp_children.remove(rev_id)
3306
fulltext_content, delta = self._knit.factory.parse_record(
3307
rev_id, record, record_details,
3308
parent_fulltext_content,
3309
copy_base_content=(not reuse_content))
3310
fulltext = self._add_fulltext_content(rev_id,
3312
blocks = KnitContent.get_line_delta_blocks(delta,
3313
parent_fulltext, fulltext)
3315
fulltext_content = self._knit.factory.parse_fulltext(
3317
fulltext = self._add_fulltext_content(rev_id,
3320
nodes_to_annotate.extend(
3321
self._add_annotation(rev_id, fulltext, parent_ids,
3322
left_matching_blocks=blocks))
3324
def _get_heads_provider(self):
3325
"""Create a heads provider for resolving ancestry issues."""
3326
if self._heads_provider is not None:
3327
return self._heads_provider
3328
parent_provider = _mod_graph.DictParentsProvider(
3329
self._revision_id_graph)
3330
graph_obj = _mod_graph.Graph(parent_provider)
3331
head_cache = _mod_graph.FrozenHeadsCache(graph_obj)
3332
self._heads_provider = head_cache
3335
def annotate(self, revision_id):
3336
"""Return the annotated fulltext at the given revision.
3338
:param revision_id: The revision id for this file
3340
records = self._get_build_graph(revision_id)
3341
if revision_id in self._ghosts:
3342
raise errors.RevisionNotPresent(revision_id, self._knit)
3343
self._annotate_records(records)
3344
return self._annotated_lines[revision_id]
3711
from bzrlib._knit_load_data_pyx import _load_data_c as _load_data
3712
except ImportError, e:
3713
osutils.failed_to_load_extension(e)
3348
from bzrlib._knit_load_data_c import _load_data_c as _load_data
3714
3350
from bzrlib._knit_load_data_py import _load_data_py as _load_data