/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to bzrlib/tests/__init__.py

Merge first-try into propagate-exceptions

Show diffs side-by-side

added added

removed removed

Lines of Context:
34
34
import difflib
35
35
import doctest
36
36
import errno
 
37
import itertools
37
38
import logging
38
39
import math
39
40
import os
135
136
SUBUNIT_SEEK_CUR = 1
136
137
 
137
138
 
138
 
class ExtendedTestResult(unittest._TextTestResult):
 
139
class ExtendedTestResult(testtools.TextTestResult):
139
140
    """Accepts, reports and accumulates the results of running tests.
140
141
 
141
142
    Compared to the unittest version this class adds support for
162
163
        :param bench_history: Optionally, a writable file object to accumulate
163
164
            benchmark results.
164
165
        """
165
 
        unittest._TextTestResult.__init__(self, stream, descriptions, verbosity)
 
166
        testtools.TextTestResult.__init__(self, stream)
166
167
        if bench_history is not None:
167
168
            from bzrlib.version import _get_bzr_source_tree
168
169
            src_tree = _get_bzr_source_tree()
195
196
        actionTaken = "Ran"
196
197
        stopTime = time.time()
197
198
        timeTaken = stopTime - self.startTime
198
 
        self.printErrors()
199
 
        self.stream.writeln(self.separator2)
200
 
        self.stream.writeln("%s %d test%s in %.3fs" % (actionTaken,
 
199
        # GZ 2010-07-19: Seems testtools has no printErrors method, and though
 
200
        #                the parent class method is similar have to duplicate
 
201
        self._show_list('ERROR', self.errors)
 
202
        self._show_list('FAIL', self.failures)
 
203
        self.stream.write(self.sep2)
 
204
        self.stream.write("%s %d test%s in %.3fs\n\n" % (actionTaken,
201
205
                            run, run != 1 and "s" or "", timeTaken))
202
 
        self.stream.writeln()
203
206
        if not self.wasSuccessful():
204
207
            self.stream.write("FAILED (")
205
208
            failed, errored = map(len, (self.failures, self.errors))
212
215
                if failed or errored: self.stream.write(", ")
213
216
                self.stream.write("known_failure_count=%d" %
214
217
                    self.known_failure_count)
215
 
            self.stream.writeln(")")
 
218
            self.stream.write(")\n")
216
219
        else:
217
220
            if self.known_failure_count:
218
 
                self.stream.writeln("OK (known_failures=%d)" %
 
221
                self.stream.write("OK (known_failures=%d)\n" %
219
222
                    self.known_failure_count)
220
223
            else:
221
 
                self.stream.writeln("OK")
 
224
                self.stream.write("OK\n")
222
225
        if self.skip_count > 0:
223
226
            skipped = self.skip_count
224
 
            self.stream.writeln('%d test%s skipped' %
 
227
            self.stream.write('%d test%s skipped\n' %
225
228
                                (skipped, skipped != 1 and "s" or ""))
226
229
        if self.unsupported:
227
230
            for feature, count in sorted(self.unsupported.items()):
228
 
                self.stream.writeln("Missing feature '%s' skipped %d tests." %
 
231
                self.stream.write("Missing feature '%s' skipped %d tests.\n" %
229
232
                    (feature, count))
230
233
        if self._strict:
231
234
            ok = self.wasStrictlySuccessful()
269
272
 
270
273
    def _shortened_test_description(self, test):
271
274
        what = test.id()
272
 
        what = re.sub(r'^bzrlib\.(tests|benchmarks)\.', '', what)
 
275
        what = re.sub(r'^bzrlib\.tests\.', '', what)
273
276
        return what
274
277
 
275
278
    def startTest(self, test):
276
 
        unittest.TestResult.startTest(self, test)
 
279
        super(ExtendedTestResult, self).startTest(test)
277
280
        if self.count == 0:
278
281
            self.startTests()
279
282
        self.report_test_start(test)
317
320
        fails with an unexpected error.
318
321
        """
319
322
        self._post_mortem()
320
 
        unittest.TestResult.addError(self, test, err)
 
323
        super(ExtendedTestResult, self).addError(test, err)
321
324
        self.error_count += 1
322
325
        self.report_error(test, err)
323
326
        if self.stop_early:
331
334
        fails because e.g. an assert() method failed.
332
335
        """
333
336
        self._post_mortem()
334
 
        unittest.TestResult.addFailure(self, test, err)
 
337
        super(ExtendedTestResult, self).addFailure(test, err)
335
338
        self.failure_count += 1
336
339
        self.report_failure(test, err)
337
340
        if self.stop_early:
351
354
                    test.id()))
352
355
        self.report_success(test)
353
356
        self._cleanupLogFile(test)
354
 
        unittest.TestResult.addSuccess(self, test)
 
357
        super(ExtendedTestResult, self).addSuccess(test)
355
358
        test._log_contents = ''
356
359
 
357
360
    def addExpectedFailure(self, test, err):
545
548
        return '%s%s' % (indent, err[1])
546
549
 
547
550
    def report_error(self, test, err):
548
 
        self.stream.writeln('ERROR %s\n%s'
 
551
        self.stream.write('ERROR %s\n%s\n'
549
552
                % (self._testTimeString(test),
550
553
                   self._error_summary(err)))
551
554
 
552
555
    def report_failure(self, test, err):
553
 
        self.stream.writeln(' FAIL %s\n%s'
 
556
        self.stream.write(' FAIL %s\n%s\n'
554
557
                % (self._testTimeString(test),
555
558
                   self._error_summary(err)))
556
559
 
557
560
    def report_known_failure(self, test, err):
558
 
        self.stream.writeln('XFAIL %s\n%s'
 
561
        self.stream.write('XFAIL %s\n%s\n'
559
562
                % (self._testTimeString(test),
560
563
                   self._error_summary(err)))
561
564
 
562
565
    def report_success(self, test):
563
 
        self.stream.writeln('   OK %s' % self._testTimeString(test))
 
566
        self.stream.write('   OK %s\n' % self._testTimeString(test))
564
567
        for bench_called, stats in getattr(test, '_benchcalls', []):
565
 
            self.stream.writeln('LSProf output for %s(%s, %s)' % bench_called)
 
568
            self.stream.write('LSProf output for %s(%s, %s)\n' % bench_called)
566
569
            stats.pprint(file=self.stream)
567
570
        # flush the stream so that we get smooth output. This verbose mode is
568
571
        # used to show the output in PQM.
569
572
        self.stream.flush()
570
573
 
571
574
    def report_skip(self, test, reason):
572
 
        self.stream.writeln(' SKIP %s\n%s'
 
575
        self.stream.write(' SKIP %s\n%s\n'
573
576
                % (self._testTimeString(test), reason))
574
577
 
575
578
    def report_not_applicable(self, test, reason):
576
 
        self.stream.writeln('  N/A %s\n    %s'
 
579
        self.stream.write('  N/A %s\n    %s\n'
577
580
                % (self._testTimeString(test), reason))
578
581
 
579
582
    def report_unsupported(self, test, feature):
580
583
        """test cannot be run because feature is missing."""
581
 
        self.stream.writeln("NODEP %s\n    The feature '%s' is not available."
 
584
        self.stream.write("NODEP %s\n    The feature '%s' is not available.\n"
582
585
                %(self._testTimeString(test), feature))
583
586
 
584
587
 
613
616
            encode = codec.encode
614
617
        stream = osutils.UnicodeOrBytesToBytesWriter(encode, stream)
615
618
        stream.encoding = new_encoding
616
 
        self.stream = unittest._WritelnDecorator(stream)
 
619
        self.stream = stream
617
620
        self.descriptions = descriptions
618
621
        self.verbosity = verbosity
619
622
        self._bench_history = bench_history
2006
2009
 
2007
2010
    def get_bzr_path(self):
2008
2011
        """Return the path of the 'bzr' executable for this test suite."""
2009
 
        bzr_path = self.get_source_path()+'/bzr'
 
2012
        bzr_path = os.path.join(self.get_source_path(), "bzr")
2010
2013
        if not os.path.isfile(bzr_path):
2011
2014
            # We are probably installed. Assume sys.argv is the right file
2012
2015
            bzr_path = sys.argv[0]
2742
2745
    :param pattern: A regular expression string.
2743
2746
    :return: A callable that returns True if the re matches.
2744
2747
    """
2745
 
    filter_re = osutils.re_compile_checked(pattern, 0,
2746
 
        'test filter')
 
2748
    filter_re = re.compile(pattern, 0)
2747
2749
    def condition(test):
2748
2750
        test_id = test.id()
2749
2751
        return filter_re.search(test_id)
3195
3197
 
3196
3198
def partition_tests(suite, count):
3197
3199
    """Partition suite into count lists of tests."""
3198
 
    result = []
3199
 
    tests = list(iter_suite_tests(suite))
3200
 
    tests_per_process = int(math.ceil(float(len(tests)) / count))
3201
 
    for block in range(count):
3202
 
        low_test = block * tests_per_process
3203
 
        high_test = low_test + tests_per_process
3204
 
        process_tests = tests[low_test:high_test]
3205
 
        result.append(process_tests)
3206
 
    return result
 
3200
    # This just assigns tests in a round-robin fashion.  On one hand this
 
3201
    # splits up blocks of related tests that might run faster if they shared
 
3202
    # resources, but on the other it avoids assigning blocks of slow tests to
 
3203
    # just one partition.  So the slowest partition shouldn't be much slower
 
3204
    # than the fastest.
 
3205
    partitions = [list() for i in range(count)]
 
3206
    tests = iter_suite_tests(suite)
 
3207
    for partition, test in itertools.izip(itertools.cycle(partitions), tests):
 
3208
        partition.append(test)
 
3209
    return partitions
3207
3210
 
3208
3211
 
3209
3212
def workaround_zealous_crypto_random():
3644
3647
        'bzrlib.doc',
3645
3648
        'bzrlib.tests.blackbox',
3646
3649
        'bzrlib.tests.commands',
 
3650
        'bzrlib.tests.doc_generate',
3647
3651
        'bzrlib.tests.per_branch',
3648
3652
        'bzrlib.tests.per_bzrdir',
3649
3653
        'bzrlib.tests.per_bzrdir_colo',
3815
3819
        'bzrlib.tests.test_transport_log',
3816
3820
        'bzrlib.tests.test_tree',
3817
3821
        'bzrlib.tests.test_treebuilder',
 
3822
        'bzrlib.tests.test_treeshape',
3818
3823
        'bzrlib.tests.test_tsort',
3819
3824
        'bzrlib.tests.test_tuned_gzip',
3820
3825
        'bzrlib.tests.test_ui',
3824
3829
        'bzrlib.tests.test_urlutils',
3825
3830
        'bzrlib.tests.test_version',
3826
3831
        'bzrlib.tests.test_version_info',
 
3832
        'bzrlib.tests.test_versionedfile',
3827
3833
        'bzrlib.tests.test_weave',
3828
3834
        'bzrlib.tests.test_whitebox',
3829
3835
        'bzrlib.tests.test_win32utils',