/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to bzrlib/tests/test_groupcompress.py

  • Committer: John Arbash Meinel
  • Date: 2009-09-03 15:23:46 UTC
  • mto: This revision was merged to the branch mainline in revision 4672.
  • Revision ID: john@arbash-meinel.com-20090903152346-wysd9b9xork5qxs5
Get a test written which exercises the 'trim' code path.

However, to get that exercised exposed that it isn't a code path we will ever
hit 'in the wild'.

Specifically, the new 'rebuild on the fly' code path says that any group less
than 75% utilized is scheduled for being rebuilt. But the 'trim' code path
only activates when we are using <50% of the block, and the last byte
is <2*percent_used. (So if we are using 30% of the block, the last byte
must be <60%, then we will trim rather than rebuild.)

Anyway, any condition under which we would trim is actually being turned into
a rebuild. So it is probably best to remove the test and the code path
during insert_record_stream.

Show diffs side-by-side

added added

removed removed

Lines of Context:
697
697
        vf.insert_record_stream(grouped_stream('abcdefghijkl'))
698
698
        vf.writer.end()
699
699
        block = manager = None
 
700
        raw_block_bytes = None
 
701
        raw_block_z_bytes = None
700
702
        record_order = []
701
703
        # Everything should fit in a single block
702
704
        for record in vf.get_record_stream([(r,) for r in 'abcdefghijkl'],
705
707
            if block is None:
706
708
                block = record._manager._block
707
709
                manager = record._manager
 
710
                raw_block_z_bytes = block._z_content
 
711
                block._ensure_content(block._content_length)
 
712
                raw_block_bytes = block._content
708
713
            else:
709
714
                self.assertIs(block, record._manager._block)
710
715
                self.assertIs(manager, record._manager)
711
716
        # 'unordered' fetching will put that in the same order it was inserted
712
717
        self.assertEqual([(r,) for r in 'abcdefghijkl'], record_order)
 
718
        # If we fetch enough of the block, but not everything, then it
 
719
        # should simply decompress, truncate, and recompress
 
720
        vf2 = self.make_test_vf(True, dir='target')
 
721
        def small_stream():
 
722
            for record in vf.get_record_stream([(r,) for r in 'acf'],
 
723
                                               'unordered', False):
 
724
                record._manager._full_enough_block_size = 50
 
725
                record._manager._max_cut_fraction = 0.3
 
726
                yield record
 
727
        vf2.insert_record_stream(small_stream())
 
728
            
 
729
        vf2.writer.end()
 
730
        record = vf2.get_record_stream([('a',)], 'unordered', False).next()
 
731
        new_block = record._manager._block
 
732
        self.assertIsNot(None, new_block._z_content)
 
733
        self.assertNotEqual(raw_block_z_bytes, new_block._z_content)
 
734
        new_block._ensure_content(new_block._content_length)
 
735
        # The new content is simply the truncation of the old content
 
736
        self.assertStartsWith(raw_block_bytes, new_block._content)
 
737
        self.assertTrue(len(new_block._content) < len(raw_block_bytes))
713
738
 
714
739
    def test_add_missing_noncompression_parent_unvalidated_index(self):
715
740
        unvalidated = self.make_g_index_missing_parent()