3311
3351
annotator = _KnitAnnotator(knit)
3312
return iter(annotator.annotate(revision_id))
3315
class _KnitAnnotator(object):
3352
return iter(annotator.annotate_flat(revision_id))
3355
class _KnitAnnotator(annotate.Annotator):
3316
3356
"""Build up the annotations for a text."""
3318
def __init__(self, knit):
3321
# Content objects, differs from fulltexts because of how final newlines
3322
# are treated by knits. the content objects here will always have a
3324
self._fulltext_contents = {}
3326
# Annotated lines of specific revisions
3327
self._annotated_lines = {}
3329
# Track the raw data for nodes that we could not process yet.
3330
# This maps the revision_id of the base to a list of children that will
3331
# annotated from it.
3332
self._pending_children = {}
3334
# Nodes which cannot be extracted
3335
self._ghosts = set()
3337
# Track how many children this node has, so we know if we need to keep
3339
self._annotate_children = {}
3340
self._compression_children = {}
3358
def __init__(self, vf):
3359
annotate.Annotator.__init__(self, vf)
3361
# TODO: handle Nodes which cannot be extracted
3362
# self._ghosts = set()
3364
# Map from (key, parent_key) => matching_blocks, should be 'use once'
3365
self._matching_blocks = {}
3367
# KnitContent objects
3368
self._content_objects = {}
3369
# The number of children that depend on this fulltext content object
3370
self._num_compression_children = {}
3371
# Delta records that need their compression parent before they can be
3373
self._pending_deltas = {}
3374
# Fulltext records that are waiting for their parents fulltexts before
3375
# they can be yielded for annotation
3376
self._pending_annotation = {}
3342
3378
self._all_build_details = {}
3343
# The children => parent revision_id graph
3344
self._revision_id_graph = {}
3346
self._heads_provider = None
3348
self._nodes_to_keep_annotations = set()
3349
self._generations_until_keep = 100
3351
def set_generations_until_keep(self, value):
3352
"""Set the number of generations before caching a node.
3354
Setting this to -1 will cache every merge node, setting this higher
3355
will cache fewer nodes.
3357
self._generations_until_keep = value
3359
def _add_fulltext_content(self, revision_id, content_obj):
3360
self._fulltext_contents[revision_id] = content_obj
3361
# TODO: jam 20080305 It might be good to check the sha1digest here
3362
return content_obj.text()
3364
def _check_parents(self, child, nodes_to_annotate):
3365
"""Check if all parents have been processed.
3367
:param child: A tuple of (rev_id, parents, raw_content)
3368
:param nodes_to_annotate: If child is ready, add it to
3369
nodes_to_annotate, otherwise put it back in self._pending_children
3371
for parent_id in child[1]:
3372
if (parent_id not in self._annotated_lines):
3373
# This parent is present, but another parent is missing
3374
self._pending_children.setdefault(parent_id,
3378
# This one is ready to be processed
3379
nodes_to_annotate.append(child)
3381
def _add_annotation(self, revision_id, fulltext, parent_ids,
3382
left_matching_blocks=None):
3383
"""Add an annotation entry.
3385
All parents should already have been annotated.
3386
:return: A list of children that now have their parents satisfied.
3388
a = self._annotated_lines
3389
annotated_parent_lines = [a[p] for p in parent_ids]
3390
annotated_lines = list(annotate.reannotate(annotated_parent_lines,
3391
fulltext, revision_id, left_matching_blocks,
3392
heads_provider=self._get_heads_provider()))
3393
self._annotated_lines[revision_id] = annotated_lines
3394
for p in parent_ids:
3395
ann_children = self._annotate_children[p]
3396
ann_children.remove(revision_id)
3397
if (not ann_children
3398
and p not in self._nodes_to_keep_annotations):
3399
del self._annotated_lines[p]
3400
del self._all_build_details[p]
3401
if p in self._fulltext_contents:
3402
del self._fulltext_contents[p]
3403
# Now that we've added this one, see if there are any pending
3404
# deltas to be done, certainly this parent is finished
3405
nodes_to_annotate = []
3406
for child in self._pending_children.pop(revision_id, []):
3407
self._check_parents(child, nodes_to_annotate)
3408
return nodes_to_annotate
3410
3380
def _get_build_graph(self, key):
3411
3381
"""Get the graphs for building texts and annotations.
3419
3389
passing to read_records_iter to start reading in the raw data from
3422
if key in self._annotated_lines:
3425
3392
pending = set([key])
3395
self._num_needed_children[key] = 1
3430
3397
# get all pending nodes
3432
3398
this_iteration = pending
3433
build_details = self._knit._index.get_build_details(this_iteration)
3399
build_details = self._vf._index.get_build_details(this_iteration)
3434
3400
self._all_build_details.update(build_details)
3435
# new_nodes = self._knit._index._get_entries(this_iteration)
3401
# new_nodes = self._vf._index._get_entries(this_iteration)
3436
3402
pending = set()
3437
3403
for key, details in build_details.iteritems():
3438
(index_memo, compression_parent, parents,
3404
(index_memo, compression_parent, parent_keys,
3439
3405
record_details) = details
3440
self._revision_id_graph[key] = parents
3406
self._parent_map[key] = parent_keys
3407
self._heads_provider = None
3441
3408
records.append((key, index_memo))
3442
3409
# Do we actually need to check _annotated_lines?
3443
pending.update(p for p in parents
3444
if p not in self._all_build_details)
3410
pending.update([p for p in parent_keys
3411
if p not in self._all_build_details])
3413
for parent_key in parent_keys:
3414
if parent_key in self._num_needed_children:
3415
self._num_needed_children[parent_key] += 1
3417
self._num_needed_children[parent_key] = 1
3445
3418
if compression_parent:
3446
self._compression_children.setdefault(compression_parent,
3449
for parent in parents:
3450
self._annotate_children.setdefault(parent,
3452
num_gens = generation - kept_generation
3453
if ((num_gens >= self._generations_until_keep)
3454
and len(parents) > 1):
3455
kept_generation = generation
3456
self._nodes_to_keep_annotations.add(key)
3419
if compression_parent in self._num_compression_children:
3420
self._num_compression_children[compression_parent] += 1
3422
self._num_compression_children[compression_parent] = 1
3458
3424
missing_versions = this_iteration.difference(build_details.keys())
3459
self._ghosts.update(missing_versions)
3460
for missing_version in missing_versions:
3461
# add a key, no parents
3462
self._revision_id_graph[missing_version] = ()
3463
pending.discard(missing_version) # don't look for it
3464
if self._ghosts.intersection(self._compression_children):
3466
"We cannot have nodes which have a ghost compression parent:\n"
3468
"compression children: %r"
3469
% (self._ghosts, self._compression_children))
3470
# Cleanout anything that depends on a ghost so that we don't wait for
3471
# the ghost to show up
3472
for node in self._ghosts:
3473
if node in self._annotate_children:
3474
# We won't be building this node
3475
del self._annotate_children[node]
3425
if missing_versions:
3426
for key in missing_versions:
3427
if key in self._parent_map and key in self._text_cache:
3428
# We already have this text ready, we just need to
3429
# yield it later so we get it annotated
3431
parent_keys = self._parent_map[key]
3432
for parent_key in parent_keys:
3433
if parent_key in self._num_needed_children:
3434
self._num_needed_children[parent_key] += 1
3436
self._num_needed_children[parent_key] = 1
3437
pending.update([p for p in parent_keys
3438
if p not in self._all_build_details])
3440
raise errors.RevisionNotPresent(key, self._vf)
3476
3441
# Generally we will want to read the records in reverse order, because
3477
3442
# we find the parent nodes after the children
3478
3443
records.reverse()
3481
def _annotate_records(self, records):
3482
"""Build the annotations for the listed records."""
3444
return records, ann_keys
3446
def _get_needed_texts(self, key, pb=None):
3447
# if True or len(self._vf._fallback_vfs) > 0:
3448
if len(self._vf._fallback_vfs) > 0:
3449
# If we have fallbacks, go to the generic path
3450
for v in annotate.Annotator._get_needed_texts(self, key, pb=pb):
3455
records, ann_keys = self._get_build_graph(key)
3456
for idx, (sub_key, text, num_lines) in enumerate(
3457
self._extract_texts(records)):
3459
pb.update('annotating', idx, len(records))
3460
yield sub_key, text, num_lines
3461
for sub_key in ann_keys:
3462
text = self._text_cache[sub_key]
3463
num_lines = len(text) # bad assumption
3464
yield sub_key, text, num_lines
3466
except errors.RetryWithNewPacks, e:
3467
self._vf._access.reload_or_raise(e)
3468
# The cached build_details are no longer valid
3469
self._all_build_details.clear()
3471
def _cache_delta_blocks(self, key, compression_parent, delta, lines):
3472
parent_lines = self._text_cache[compression_parent]
3473
blocks = list(KnitContent.get_line_delta_blocks(delta, parent_lines, lines))
3474
self._matching_blocks[(key, compression_parent)] = blocks
3476
def _expand_record(self, key, parent_keys, compression_parent, record,
3479
if compression_parent:
3480
if compression_parent not in self._content_objects:
3481
# Waiting for the parent
3482
self._pending_deltas.setdefault(compression_parent, []).append(
3483
(key, parent_keys, record, record_details))
3485
# We have the basis parent, so expand the delta
3486
num = self._num_compression_children[compression_parent]
3489
base_content = self._content_objects.pop(compression_parent)
3490
self._num_compression_children.pop(compression_parent)
3492
self._num_compression_children[compression_parent] = num
3493
base_content = self._content_objects[compression_parent]
3494
# It is tempting to want to copy_base_content=False for the last
3495
# child object. However, whenever noeol=False,
3496
# self._text_cache[parent_key] is content._lines. So mutating it
3497
# gives very bad results.
3498
# The alternative is to copy the lines into text cache, but then we
3499
# are copying anyway, so just do it here.
3500
content, delta = self._vf._factory.parse_record(
3501
key, record, record_details, base_content,
3502
copy_base_content=True)
3505
content, _ = self._vf._factory.parse_record(
3506
key, record, record_details, None)
3507
if self._num_compression_children.get(key, 0) > 0:
3508
self._content_objects[key] = content
3509
lines = content.text()
3510
self._text_cache[key] = lines
3511
if delta is not None:
3512
self._cache_delta_blocks(key, compression_parent, delta, lines)
3515
def _get_parent_annotations_and_matches(self, key, text, parent_key):
3516
"""Get the list of annotations for the parent, and the matching lines.
3518
:param text: The opaque value given by _get_needed_texts
3519
:param parent_key: The key for the parent text
3520
:return: (parent_annotations, matching_blocks)
3521
parent_annotations is a list as long as the number of lines in
3523
matching_blocks is a list of (parent_idx, text_idx, len) tuples
3524
indicating which lines match between the two texts
3526
block_key = (key, parent_key)
3527
if block_key in self._matching_blocks:
3528
blocks = self._matching_blocks.pop(block_key)
3529
parent_annotations = self._annotations_cache[parent_key]
3530
return parent_annotations, blocks
3531
return annotate.Annotator._get_parent_annotations_and_matches(self,
3532
key, text, parent_key)
3534
def _process_pending(self, key):
3535
"""The content for 'key' was just processed.
3537
Determine if there is any more pending work to be processed.
3540
if key in self._pending_deltas:
3541
compression_parent = key
3542
children = self._pending_deltas.pop(key)
3543
for child_key, parent_keys, record, record_details in children:
3544
lines = self._expand_record(child_key, parent_keys,
3546
record, record_details)
3547
if self._check_ready_for_annotations(child_key, parent_keys):
3548
to_return.append(child_key)
3549
# Also check any children that are waiting for this parent to be
3551
if key in self._pending_annotation:
3552
children = self._pending_annotation.pop(key)
3553
to_return.extend([c for c, p_keys in children
3554
if self._check_ready_for_annotations(c, p_keys)])
3557
def _check_ready_for_annotations(self, key, parent_keys):
3558
"""return true if this text is ready to be yielded.
3560
Otherwise, this will return False, and queue the text into
3561
self._pending_annotation
3563
for parent_key in parent_keys:
3564
if parent_key not in self._annotations_cache:
3565
# still waiting on at least one parent text, so queue it up
3566
# Note that if there are multiple parents, we need to wait
3568
self._pending_annotation.setdefault(parent_key,
3569
[]).append((key, parent_keys))
3573
def _extract_texts(self, records):
3574
"""Extract the various texts needed based on records"""
3483
3575
# We iterate in the order read, rather than a strict order requested
3484
3576
# However, process what we can, and put off to the side things that
3485
3577
# still need parents, cleaning them up when those parents are
3487
for (rev_id, record,
3488
digest) in self._knit._read_records_iter(records):
3489
if rev_id in self._annotated_lines:
3580
# 1) As 'records' are read, see if we can expand these records into
3581
# Content objects (and thus lines)
3582
# 2) If a given line-delta is waiting on its compression parent, it
3583
# gets queued up into self._pending_deltas, otherwise we expand
3584
# it, and put it into self._text_cache and self._content_objects
3585
# 3) If we expanded the text, we will then check to see if all
3586
# parents have also been processed. If so, this text gets yielded,
3587
# else this record gets set aside into pending_annotation
3588
# 4) Further, if we expanded the text in (2), we will then check to
3589
# see if there are any children in self._pending_deltas waiting to
3590
# also be processed. If so, we go back to (2) for those
3591
# 5) Further again, if we yielded the text, we can then check if that
3592
# 'unlocks' any of the texts in pending_annotations, which should
3593
# then get yielded as well
3594
# Note that both steps 4 and 5 are 'recursive' in that unlocking one
3595
# compression child could unlock yet another, and yielding a fulltext
3596
# will also 'unlock' the children that are waiting on that annotation.
3597
# (Though also, unlocking 1 parent's fulltext, does not unlock a child
3598
# if other parents are also waiting.)
3599
# We want to yield content before expanding child content objects, so
3600
# that we know when we can re-use the content lines, and the annotation
3601
# code can know when it can stop caching fulltexts, as well.
3603
# Children that are missing their compression parent
3605
for (key, record, digest) in self._vf._read_records_iter(records):
3607
details = self._all_build_details[key]
3608
(_, compression_parent, parent_keys, record_details) = details
3609
lines = self._expand_record(key, parent_keys, compression_parent,
3610
record, record_details)
3612
# Pending delta should be queued up
3491
parent_ids = self._revision_id_graph[rev_id]
3492
parent_ids = [p for p in parent_ids if p not in self._ghosts]
3493
details = self._all_build_details[rev_id]
3494
(index_memo, compression_parent, parents,
3495
record_details) = details
3496
nodes_to_annotate = []
3497
# TODO: Remove the punning between compression parents, and
3498
# parent_ids, we should be able to do this without assuming
3500
if len(parent_ids) == 0:
3501
# There are no parents for this node, so just add it
3502
# TODO: This probably needs to be decoupled
3503
fulltext_content, delta = self._knit._factory.parse_record(
3504
rev_id, record, record_details, None)
3505
fulltext = self._add_fulltext_content(rev_id, fulltext_content)
3506
nodes_to_annotate.extend(self._add_annotation(rev_id, fulltext,
3507
parent_ids, left_matching_blocks=None))
3509
child = (rev_id, parent_ids, record)
3510
# Check if all the parents are present
3511
self._check_parents(child, nodes_to_annotate)
3512
while nodes_to_annotate:
3513
# Should we use a queue here instead of a stack?
3514
(rev_id, parent_ids, record) = nodes_to_annotate.pop()
3515
(index_memo, compression_parent, parents,
3516
record_details) = self._all_build_details[rev_id]
3518
if compression_parent is not None:
3519
comp_children = self._compression_children[compression_parent]
3520
if rev_id not in comp_children:
3521
raise AssertionError("%r not in compression children %r"
3522
% (rev_id, comp_children))
3523
# If there is only 1 child, it is safe to reuse this
3525
reuse_content = (len(comp_children) == 1
3526
and compression_parent not in
3527
self._nodes_to_keep_annotations)
3529
# Remove it from the cache since it will be changing
3530
parent_fulltext_content = self._fulltext_contents.pop(compression_parent)
3531
# Make sure to copy the fulltext since it might be
3533
parent_fulltext = list(parent_fulltext_content.text())
3535
parent_fulltext_content = self._fulltext_contents[compression_parent]
3536
parent_fulltext = parent_fulltext_content.text()
3537
comp_children.remove(rev_id)
3538
fulltext_content, delta = self._knit._factory.parse_record(
3539
rev_id, record, record_details,
3540
parent_fulltext_content,
3541
copy_base_content=(not reuse_content))
3542
fulltext = self._add_fulltext_content(rev_id,
3544
if compression_parent == parent_ids[0]:
3545
# the compression_parent is the left parent, so we can
3547
blocks = KnitContent.get_line_delta_blocks(delta,
3548
parent_fulltext, fulltext)
3550
fulltext_content = self._knit._factory.parse_fulltext(
3552
fulltext = self._add_fulltext_content(rev_id,
3554
nodes_to_annotate.extend(
3555
self._add_annotation(rev_id, fulltext, parent_ids,
3556
left_matching_blocks=blocks))
3558
def _get_heads_provider(self):
3559
"""Create a heads provider for resolving ancestry issues."""
3560
if self._heads_provider is not None:
3561
return self._heads_provider
3562
parent_provider = _mod_graph.DictParentsProvider(
3563
self._revision_id_graph)
3564
graph_obj = _mod_graph.Graph(parent_provider)
3565
head_cache = _mod_graph.FrozenHeadsCache(graph_obj)
3566
self._heads_provider = head_cache
3569
def annotate(self, key):
3570
"""Return the annotated fulltext at the given key.
3572
:param key: The key to annotate.
3574
if len(self._knit._fallback_vfs) > 0:
3575
# stacked knits can't use the fast path at present.
3576
return self._simple_annotate(key)
3579
records = self._get_build_graph(key)
3580
if key in self._ghosts:
3581
raise errors.RevisionNotPresent(key, self._knit)
3582
self._annotate_records(records)
3583
return self._annotated_lines[key]
3584
except errors.RetryWithNewPacks, e:
3585
self._knit._access.reload_or_raise(e)
3586
# The cached build_details are no longer valid
3587
self._all_build_details.clear()
3589
def _simple_annotate(self, key):
3590
"""Return annotated fulltext, rediffing from the full texts.
3592
This is slow but makes no assumptions about the repository
3593
being able to produce line deltas.
3595
# TODO: this code generates a parent maps of present ancestors; it
3596
# could be split out into a separate method, and probably should use
3597
# iter_ancestry instead. -- mbp and robertc 20080704
3598
graph = _mod_graph.Graph(self._knit)
3599
head_cache = _mod_graph.FrozenHeadsCache(graph)
3600
search = graph._make_breadth_first_searcher([key])
3604
present, ghosts = search.next_with_ghosts()
3605
except StopIteration:
3607
keys.update(present)
3608
parent_map = self._knit.get_parent_map(keys)
3610
reannotate = annotate.reannotate
3611
for record in self._knit.get_record_stream(keys, 'topological', True):
3613
fulltext = osutils.chunks_to_lines(record.get_bytes_as('chunked'))
3614
parents = parent_map[key]
3615
if parents is not None:
3616
parent_lines = [parent_cache[parent] for parent in parent_map[key]]
3619
parent_cache[key] = list(
3620
reannotate(parent_lines, fulltext, key, None, head_cache))
3622
return parent_cache[key]
3624
raise errors.RevisionNotPresent(key, self._knit)
3614
# At this point, we may be able to yield this content, if all
3615
# parents are also finished
3616
yield_this_text = self._check_ready_for_annotations(key,
3619
# All parents present
3620
yield key, lines, len(lines)
3621
to_process = self._process_pending(key)
3623
this_process = to_process
3625
for key in this_process:
3626
lines = self._text_cache[key]
3627
yield key, lines, len(lines)
3628
to_process.extend(self._process_pending(key))
3628
from bzrlib._knit_load_data_c import _load_data_c as _load_data
3631
from bzrlib._knit_load_data_pyx import _load_data_c as _load_data
3629
3632
except ImportError:
3630
3633
from bzrlib._knit_load_data_py import _load_data_py as _load_data