PATCH: Continue improving scalability

Mark Mitchell mark at codesourcery.com
Fri Aug 1 19:49:04 UTC 2003


This patch continues the push to improve the scalability of QMTest.
In particular, TextResultStream no longer stores all results -- just
the results with unexpected outcomes.

--
Mark Mitchell
CodeSourcery, LLC
mark at codesourcery.com

2003-08-01  Mark Mitchell  <mark at codesourcery.com>

	* qm/test/base.py (split_results_by_expected_outcome): Remove.
	* qm/test/cmdline.py (QMTest.__GetResultStreams): Remove
	test_suites parameter.
	* qm/test/result_stream.py (ResultStream.arguments): Remove
	suite_ids.
	* qm/test/classes/text_result_stream.py
	(TextResultStream.__init__): Do not save test and resource
	results.
	(TextResultStream.WriteResult): Update outcome counts, but do not
	save results.
	(TextResultStream.Summarize): Adjust accordingly.
	(TextResultStream._SummarizeTestStats): Remove results parameter.
	(TextResultStream._SummarizeRelativeTestStats): Likewise.
	(TextResultStream._CountOutcomes): Remove.
	(TextResultStream._SummarizeTestSuiteStats): Remove.

Index: qm/test/base.py
===================================================================
RCS file: /home/sc/Repository/qm/qm/test/base.py,v
retrieving revision 1.88
diff -c -5 -p -r1.88 base.py
*** qm/test/base.py	24 Jul 2003 06:38:39 -0000	1.88
--- qm/test/base.py	1 Aug 2003 19:45:16 -0000
*************** def _result_from_dom(node):
*** 392,424 ****
          result[name] = value
  
      return result
  
  
- def split_results_by_expected_outcome(results, expected_outcomes):
-     """Partition a sequence of results by expected outcomes.
- 
-     'results' -- A sequence of 'Result' objects.
- 
-     'expected_outcomes' -- A map from ID to corresponding expected
-     outcome.
- 
-     returns -- A pair of lists.  The first contains results that
-     produced the expected outcome.  The second contains results that
-     didn't."""
- 
-     expected = []
-     unexpected = []
-     for result in results:
-         expected_outcome = expected_outcomes.get(result.GetId(), Result.PASS)
-         if result.GetOutcome() == expected_outcome:
-             expected.append(result)
-         else:
-             unexpected.append(result)
-     return expected, unexpected
- 
- 
  ########################################################################
  # variables
  ########################################################################
  
  extension_kinds = [ 'database',
--- 392,401 ----
Index: qm/test/cmdline.py
===================================================================
RCS file: /home/sc/Repository/qm/qm/test/cmdline.py,v
retrieving revision 1.90
diff -c -5 -p -r1.90 cmdline.py
*** qm/test/cmdline.py	24 Jul 2003 18:14:17 -0000	1.90
--- qm/test/cmdline.py	1 Aug 2003 19:45:17 -0000
*************** Valid formats are "full", "brief" (the d
*** 1235,1245 ****
  
          any_unexpected_outcomes = 0
  
          # Compute the list of result streams to which output should be
          # written.
!         streams = self.__GetResultStreams(suite_ids)
          
          # Send the annotations through.
          for s in streams:
              s.WriteAllAnnotations(results.GetAnnotations())
  
--- 1235,1245 ----
  
          any_unexpected_outcomes = 0
  
          # Compute the list of result streams to which output should be
          # written.
!         streams = self.__GetResultStreams()
          
          # Send the annotations through.
          for s in streams:
              s.WriteAllAnnotations(results.GetAnnotations())
  
*************** Valid formats are "full", "brief" (the d
*** 1383,1393 ****
              rs = (self.GetFileResultStreamClass()
                    ({ "filename" : result_file_name}))
              result_streams.append(rs)
  
          # Handle the --result-stream options.
!         result_streams.extend(self.__GetResultStreams(test_suites))
          
          if self.HasCommandOption("random"):
              # Randomize the order of the tests.
              random.shuffle(test_ids)
          else:
--- 1383,1393 ----
              rs = (self.GetFileResultStreamClass()
                    ({ "filename" : result_file_name}))
              result_streams.append(rs)
  
          # Handle the --result-stream options.
!         result_streams.extend(self.__GetResultStreams())
          
          if self.HasCommandOption("random"):
              # Randomize the order of the tests.
              random.shuffle(test_ids)
          else:
*************** Valid formats are "full", "brief" (the d
*** 1581,1591 ****
              raise qm.cmdline.CommandError, \
                    qm.error("invalid extension kind",
                             kind = kind)
  
                         
!     def __GetResultStreams(self, test_suites):
          """Return the result streams to use.
  
          returns -- A list of 'ResultStream' objects, as indicated by the
          user."""
  
--- 1581,1591 ----
              raise qm.cmdline.CommandError, \
                    qm.error("invalid extension kind",
                             kind = kind)
  
                         
!     def __GetResultStreams(self):
          """Return the result streams to use.
  
          returns -- A list of 'ResultStream' objects, as indicated by the
          user."""
  
*************** Valid formats are "full", "brief" (the d
*** 1594,1604 ****
          result_streams = []
  
          arguments = {
              "expected_outcomes" : self.__GetExpectedOutcomes(),
              "database" : database,
-             "suite_ids" : test_suites
              }
          
          # Look up the summary format.
          format = self.GetCommandOption("format", "")
          if format and format not in self.summary_formats:
--- 1594,1603 ----
*************** Valid formats are "full", "brief" (the d
*** 1612,1622 ****
          if format != "none":
              as = { "format" : format }
              as.update(arguments)
              stream = self.GetTextResultStreamClass()(as)
              result_streams.append(stream)
!         
          f = lambda n: qm.test.base.get_extension_class(n,
                                                         "result_stream",
                                                         database)
          
          # Look for all of the "--result-stream" options.
--- 1611,1621 ----
          if format != "none":
              as = { "format" : format }
              as.update(arguments)
              stream = self.GetTextResultStreamClass()(as)
              result_streams.append(stream)
! 
          f = lambda n: qm.test.base.get_extension_class(n,
                                                         "result_stream",
                                                         database)
          
          # Look for all of the "--result-stream" options.
Index: qm/test/result_stream.py
===================================================================
RCS file: /home/sc/Repository/qm/qm/test/result_stream.py,v
retrieving revision 1.7
diff -c -5 -p -r1.7 result_stream.py
*** qm/test/result_stream.py	3 Jul 2003 19:32:04 -0000	1.7
--- qm/test/result_stream.py	1 Aug 2003 19:45:17 -0000
*************** class ResultStream(qm.extension.Extensio
*** 46,57 ****
      arguments = [
          qm.fields.PythonField(
             name = "expected_outcomes"),
          qm.fields.PythonField(
             name = "database"),
-         qm.fields.PythonField(
-            name = "suite_ids"),
          ]
      
      def WriteAnnotation(self, key, value):
          """Output an annotation for this run.
  
--- 46,55 ----
Index: qm/test/classes/text_result_stream.py
===================================================================
RCS file: /home/sc/Repository/qm/qm/test/classes/text_result_stream.py,v
retrieving revision 1.2
diff -c -5 -p -r1.2 text_result_stream.py
*** qm/test/classes/text_result_stream.py	31 Jul 2003 23:17:50 -0000	1.2
--- qm/test/classes/text_result_stream.py	1 Aug 2003 19:45:17 -0000
*************** class TextResultStream(FileResultStream)
*** 79,89 ****
              gives details about any tests with unexpected outcomes.
  
              The "full" format is like "brief" except that all
              annotations are shown for tests as they are run.
  
!             The "stats" format omits the failing tests section."""),
          ]
      
      def __init__(self, arguments):
          """Construct a 'TextResultStream'.
  
--- 79,90 ----
              gives details about any tests with unexpected outcomes.
  
              The "full" format is like "brief" except that all
              annotations are shown for tests as they are run.
  
!             In the "stats" format only the summary statistics are
!             displayed."""),
          ]
      
      def __init__(self, arguments):
          """Construct a 'TextResultStream'.
  
*************** class TextResultStream(FileResultStream)
*** 102,129 ****
                  if self.file.isatty():
                      self.format = "brief"
              except:
                  pass
              
-         self.__test_results = []
-         self.__resource_results = []
          self.__first_test = 1
!         
!         
      def WriteResult(self, result):
          """Output a test or resource result.
  
          'result' -- A 'Result'."""
  
          # Record the results as they are received.
          if result.GetKind() == Result.TEST:
!             self.__test_results.append(result)
          else:
!             self.__resource_results.append(result)
! 
!         # In batch mode, no results are displayed as tests are run.
!         if self.format == "batch":
              return
          
          # Display a heading before the first result.
          if self.__first_test:
              self._DisplayHeading("TEST RESULTS")
--- 103,154 ----
                  if self.file.isatty():
                      self.format = "brief"
              except:
                  pass
              
          self.__first_test = 1
!         # Keep track of tests and resources that had unexpected outcomes.
!         self.__unexpected_test_results = []
!         self.__unexpected_resource_results = []
!         # And how many tests were run.
!         self.__num_tests = 0
!         # And how many tests there are with each outcome.
!         self.__outcome_counts = {}
!         for o in Result.outcomes:
!             self.__outcome_counts[o] = 0
!         # And how many unexpected tests there are with each outcome.
!         self.__unexpected_outcome_counts = {}
!         for o in Result.outcomes:
!             self.__unexpected_outcome_counts[o] = 0
! 
! 
      def WriteResult(self, result):
          """Output a test or resource result.
  
          'result' -- A 'Result'."""
  
          # Record the results as they are received.
          if result.GetKind() == Result.TEST:
!             # Remember how many tests we have processed.
!             self.__num_tests += 1
!             # Increment the outcome count.
!             outcome = result.GetOutcome()
!             self.__outcome_counts[outcome] += 1
!             # Remember tests with unexpected results so that we can
!             # display them at the end of the run.
!             test_id = result.GetId()
!             expected_outcome = self.expected_outcomes.get(result.GetId(),
!                                                           Result.PASS)
!             if outcome != expected_outcome:
!                 self.__unexpected_outcome_counts[outcome] += 1
!                 self.__unexpected_test_results.append(result)
          else:
!             if result.GetOutcome() != result.PASS:
!                 self.__unexpected_resource_results.append(result)
!             
!         # In some modes, no results are displayed as the tests are run.
!         if self.format == "batch" or self.format == "stats":
              return
          
          # Display a heading before the first result.
          if self.__first_test:
              self._DisplayHeading("TEST RESULTS")
*************** class TextResultStream(FileResultStream)
*** 153,189 ****
          # Show results for tests with unexpected outcomes.
          if self.format in ("full", "brief", "batch"):
              compare_ids = lambda r1, r2: cmp(r1.GetId(), r2.GetId())
  
              # Sort test results by ID.
!             self.__test_results.sort(compare_ids)
              # Print individual test results.
              if self.expected_outcomes:
-                 # Show tests that produced unexpected outcomes.
-                 bad_results = split_results_by_expected_outcome(
-                     self.__test_results, self.expected_outcomes)[1]
                  self._DisplayHeading("TESTS WITH UNEXPECTED OUTCOMES")
!                 self._SummarizeResults(bad_results)
!             if not self.expected_outcomes or self.format == "full":
!                 # No expected outcomes were specified, so show all tests
!                 # that did not pass.
!                 bad_results = filter(
!                     lambda r: r.GetOutcome() != Result.PASS,
!                     self.__test_results)
!                 if bad_results:
!                     self._DisplayHeading("TESTS THAT DID NOT PASS")
!                     self._SummarizeResults(bad_results)
! 
!             # Sort resource results by ID.
!             self.__resource_results.sort(compare_ids)
!             bad_results = filter(
!                 lambda r: r.GetOutcome() != Result.PASS,
!                 self.__resource_results)
!             if len(bad_results) > 0:
                  # Print individual resource results.
                  self._DisplayHeading("RESOURCES THAT DID NOT PASS")
!                 self._SummarizeResults(bad_results)
  
          if self.format != "batch":
              self._DisplayStatistics()
          
          # Invoke the base class method.
--- 178,201 ----
          # Show results for tests with unexpected outcomes.
          if self.format in ("full", "brief", "batch"):
              compare_ids = lambda r1, r2: cmp(r1.GetId(), r2.GetId())
  
              # Sort test results by ID.
!             self.__unexpected_test_results.sort(compare_ids)
              # Print individual test results.
              if self.expected_outcomes:
                  self._DisplayHeading("TESTS WITH UNEXPECTED OUTCOMES")
!             else:
!                 self._DisplayHeading("TESTS THAT DID NOT PASS")
!             self._SummarizeResults(self.__unexpected_test_results)
! 
!             if self.__unexpected_resource_results:
!                 # Sort resource results by ID.
!                 self.__unexpected_resource_results.sort(compare_ids)
                  # Print individual resource results.
                  self._DisplayHeading("RESOURCES THAT DID NOT PASS")
!                 self._SummarizeResults(self.__unexpected_resource_results)
  
          if self.format != "batch":
              self._DisplayStatistics()
          
          # Invoke the base class method.
*************** class TextResultStream(FileResultStream)
*** 198,315 ****
          self.file.write("\n")
          self._DisplayHeading("STATISTICS")
  
          # Summarize the test statistics.
          if self.expected_outcomes:
!             self._SummarizeRelativeTestStats(self.__test_results)
          else:
!             self._SummarizeTestStats(self.__test_results)
! 
!         # Summarize test results by test suite.
!         if self.format in ("full", "stats") \
!            and len(self.suite_ids) > 0:
!             # Print statistics by test suite.
!             self._DisplayHeading("STATISTICS BY TEST SUITE")
!             self._SummarizeTestSuiteStats()
  
-         
-     def _SummarizeTestStats(self, results):
-         """Generate statistics about the overall results.
  
!         'results' -- The sequence of 'Result' objects to summarize."""
  
!         num_tests = len(results)
          self.file.write("  %6d        tests total\n" % num_tests)
  
          # If there are no tests, there is no need to go into detail.
          if num_tests == 0:
              return
  
-         counts_by_outcome = self._CountOutcomes(results)
          for outcome in Result.outcomes:
!             count = counts_by_outcome[outcome]
              if count > 0:
                  self.file.write("  %6d (%3.0f%%) tests %s\n"
                                  % (count, (100. * count) / num_tests,
                                     outcome))
          self.file.write("\n")
  
          
!     def _SummarizeRelativeTestStats(self, results):
!         """Generate statistics showing results relative to expectations.
! 
!         'results' -- The sequence of 'Result' objects to summarize."""
  
          # Indicate the total number of tests.
!         num_tests = len(results)
          self.file.write("  %6d        tests total\n" % num_tests)
  
          # If there are no tests, there is no need to go into detail.
          if num_tests == 0:
              return
  
-         # Split the results into those that produced expected outcomes, and
-         # those that didn't.
-         expected, unexpected = \
-             split_results_by_expected_outcome(results,
-                                               self.expected_outcomes)
          # Report the number that produced expected outcomes.
          self.file.write("  %6d (%3.0f%%) tests as expected\n"
!                         % (len(expected),
!                            (100. * len(expected)) / num_tests))
          # For results that produced unexpected outcomes, break them down by
          # actual outcome.
-         counts_by_outcome = self._CountOutcomes(unexpected)
          for outcome in Result.outcomes:
!             count = counts_by_outcome[outcome]
              if count > 0:
                  self.file.write("  %6d (%3.0f%%) tests unexpected %s\n"
                                  % (count, (100. * count) / num_tests,
                                     outcome))
          self.file.write("\n")
  
  
-     def _CountOutcomes(self, results):
-         """Count results by outcome.
- 
-         'results' -- A sequence of 'Result' objects.
- 
-         returns -- A map from outcomes to counts of results with that
-         outcome.""" 
- 
-         counts = {}
-         for outcome in Result.outcomes:
-             counts[outcome] = 0
-         for result in results:
-             outcome = result.GetOutcome()
-             counts[outcome] = counts[outcome] + 1
-         return counts
-         
-         
-     def _SummarizeTestSuiteStats(self):
-         """Generate statistics showing results by test suite."""
- 
-         for suite_id in self.suite_ids:
-             # Expand the contents of the suite.
-             suite = self.database.GetSuite(suite_id)
-             ids_in_suite = suite.GetAllTestAndSuiteIds()[0]
-             # Determine the results belonging to tests in the suite.
-             results_in_suite = []
-             for result in self.__test_results:
-                 if result.GetId() in ids_in_suite:
-                     results_in_suite.append(result)
-             # If there aren't any, skip.
-             if len(results_in_suite) == 0:
-                 continue
- 
-             self.file.write("  %s\n" % suite.GetId())
-             if self.expected_outcomes is None:
-                 self._SummarizeTestStats(results_in_suite)
-             else:
-                 self._SummarizeRelativeTestStats(results_in_suite)
- 
-         
      def _SummarizeResults(self, results):
          """Summarize each of the results.
  
          'results' -- The sequence of 'Result' objects to summarize."""
  
--- 210,271 ----
          self.file.write("\n")
          self._DisplayHeading("STATISTICS")
  
          # Summarize the test statistics.
          if self.expected_outcomes:
!             self._SummarizeRelativeTestStats()
          else:
!             self._SummarizeTestStats()
  
  
!     def _SummarizeTestStats(self):
!         """Generate statistics about the overall results."""
  
!         num_tests = self.__num_tests
          self.file.write("  %6d        tests total\n" % num_tests)
  
          # If there are no tests, there is no need to go into detail.
          if num_tests == 0:
              return
  
          for outcome in Result.outcomes:
!             count = self.__outcome_counts[outcome]
              if count > 0:
                  self.file.write("  %6d (%3.0f%%) tests %s\n"
                                  % (count, (100. * count) / num_tests,
                                     outcome))
          self.file.write("\n")
  
          
!     def _SummarizeRelativeTestStats(self):
!         """Generate statistics showing results relative to expectations."""
  
          # Indicate the total number of tests.
!         num_tests = self.__num_tests
          self.file.write("  %6d        tests total\n" % num_tests)
  
          # If there are no tests, there is no need to go into detail.
          if num_tests == 0:
              return
  
          # Report the number that produced expected outcomes.
+         unexpected_count = len(self.__unexpected_test_results)
+         expected_count = num_tests - unexpected_count
          self.file.write("  %6d (%3.0f%%) tests as expected\n"
!                         % (expected_count,
!                            (100. * expected_count) / num_tests))
          # For results that produced unexpected outcomes, break them down by
          # actual outcome.
          for outcome in Result.outcomes:
!             count = self.__unexpected_outcome_counts[outcome]
              if count > 0:
                  self.file.write("  %6d (%3.0f%%) tests unexpected %s\n"
                                  % (count, (100. * count) / num_tests,
                                     outcome))
          self.file.write("\n")
  
  
      def _SummarizeResults(self, results):
          """Summarize each of the results.
  
          'results' -- The sequence of 'Result' objects to summarize."""
  



More information about the qmtest mailing list