[PATCH] Misc. fixes for DejaGNU and TET emulation

Nathaniel Smith njs at pobox.com
Mon Mar 15 21:09:07 UTC 2004


- Fixes a number of bugs in DejaGNU and TET emulation.
- Improves error reporting for resource extensions that fail to load.

-- Nathaniel

-- 
"But suppose I am not willing to claim that.  For in fact pianos
are heavy, and very few persons can carry a piano all by themselves."
-------------- next part --------------
? results.qmr
? qm/external/__init__.pyc
? tests/regress/QMTest/regression_database.pyc
? tests/regress/QMTest/selftest.pyc
? tests/regress/tuple1/QMTest/tuple_test.pyc
? tests/results_files/QMTest/results_file_database.pyc
? tests/results_files/QMTest/results_file_test.pyc
Index: ChangeLog
===================================================================
RCS file: /home/qm/Repository/qm/ChangeLog,v
retrieving revision 1.613
diff -u -r1.613 ChangeLog
--- ChangeLog	3 Mar 2004 07:12:12 -0000	1.613
+++ ChangeLog	15 Mar 2004 21:02:45 -0000
@@ -1,3 +1,29 @@
+2004-03-15  Nathaniel Smith  <njs at codesourcery.com>
+
+	* qm/test/base.py (CouldNotLoadExtensionError.__init__): Save
+	'exc_info' value for later use.
+	* qm/test/target.py (qm.test.base): Import it.
+	(Target._SetUpResource): Check for
+	'CouldNotLoadExtensionError'.
+
+	* qm/test/classes/dg_test.py (DGTest._RunDGTest): Clarify
+	comment.
+	Convert to pass tool arguments as a list, not a string.
+	Support 'dg-excess-errors'.
+	(DGTest._RunTool): Convert docstring to describe new argument
+	passing conventions.
+	(DGTest._DGoptions): Convert options immediately from string to
+	list.
+	(DGTest._DGexcess_errors): New method.
+	
+	* qm/test/classes/tet_stream.py (TETStream): Add detailed
+	description of TET/LSB and DejaGNU result codes.  Fix mapping
+	between them.
+	
+	* qm/test/classes/dejagnu_test.py
+	(DejaGNUTest.TargetExecutable._StdinPipe): Don't override.
+	(DejaGNUTest.BuildExecutable._StdinPipe): Don't override.
+
 2004-03-02  Mark Mitchell  <mark at codesourcery.com>
 
 	* qmdist/command/install_scripts.py (install_scripts.run): Handle
Index: qm/test/base.py
===================================================================
RCS file: /home/qm/Repository/qm/qm/test/base.py,v
retrieving revision 1.96
diff -u -r1.96 base.py
--- qm/test/base.py	3 Jan 2004 04:02:59 -0000	1.96
+++ qm/test/base.py	15 Mar 2004 21:02:45 -0000
@@ -47,6 +47,7 @@
 
         'exc_info' -- An exception tuple, as returned by 'sys.exc_info'."""
         
+        self.exc_info = exc_info
         message = qm.common.format_exception(exc_info)
         message += "\n" + qm.error("could not load extension class",
                                    class_name = class_name)
Index: qm/test/target.py
===================================================================
RCS file: /home/qm/Repository/qm/qm/test/target.py,v
retrieving revision 1.24
diff -u -r1.24 target.py
--- qm/test/target.py	20 Feb 2004 10:08:52 -0000	1.24
+++ qm/test/target.py	15 Mar 2004 21:02:45 -0000
@@ -21,6 +21,7 @@
 import qm.common
 import qm.extension
 import qm.platform
+import qm.test.base
 from   qm.test.context import *
 from   qm.test.result import *
 from   qm.test.database import NoSuchResourceError
@@ -390,6 +391,9 @@
             result.NoteException(cause="Resource is missing from the database.")
             self._RecordResult(result)
             return (None, result, None)
+        except qm.base.CouldNotLoadExtensionError, e:
+            result.NoteException(e.exc_info,
+                                 cause = "Could not load extension class")
         except KeyboardInterrupt:
             result.NoteException()
             # We received a KeyboardInterrupt, indicating that the
Index: qm/test/classes/dejagnu_test.py
===================================================================
RCS file: /home/qm/Repository/qm/qm/test/classes/dejagnu_test.py,v
retrieving revision 1.3
diff -u -r1.3 dejagnu_test.py
--- qm/test/classes/dejagnu_test.py	4 Jun 2003 15:57:59 -0000	1.3
+++ qm/test/classes/dejagnu_test.py	15 Mar 2004 21:02:45 -0000
@@ -88,12 +88,6 @@
         Classes derived from 'DejaGNUTest' may provide derived
         versions of this class."""
 
-        def _StdinPipe(self):
-
-            # No input is provided to the program.
-            return None
-
-        
         def _StderrPipe(self):
 
             # Combine stdout/stderr into a single stream.
@@ -106,12 +100,6 @@
         Classes derived from 'DejaGNUTest' may provide derived
         versions of this class."""
 
-        def _StdinPipe(self):
-
-            # No input is provided to the program.
-            return None
-
-        
         def _StderrPipe(self):
 
             # Combine stdout/stderr into a single stream.
Index: qm/test/classes/dg_test.py
===================================================================
RCS file: /home/qm/Repository/qm/qm/test/classes/dg_test.py,v
retrieving revision 1.4
diff -u -r1.4 dg_test.py
--- qm/test/classes/dg_test.py	3 Mar 2004 05:42:54 -0000	1.4
+++ qm/test/classes/dg_test.py	15 Mar 2004 21:02:45 -0000
@@ -87,13 +87,13 @@
                    keep_output = 0):
         """Run a 'dg' test.
 
-        'tool_flags' -- A string giving a set of options to be
+        'tool_flags' -- A list of strings giving a set of options to be
         provided to the tool being tested.
         
-        'default_options' -- A string giving a default set of options
-        to be provided to the tool being tested.  These options can be
-        overridden by an embedded 'dg-options' command in the test
-        itself.
+        'default_options' -- A list of strings giving a default set of
+        options to be provided to the tool being tested.  These options
+        can be overridden by an embedded 'dg-options' command in the
+        test itself.
         
         'context' -- The 'Context' in which this test is running.
 
@@ -116,8 +116,9 @@
         self._kind = default_kind
         self._selected = None
         self._expectation = None
-        self._options = default_options
+        self._options = list(default_options)
         self._diagnostics = []
+        self._excess_errors_expected = False
         self._final_commands = []
         # Iterate through the test looking for embedded commands.
         line_num = 0
@@ -127,7 +128,7 @@
         if path.startswith(root):
             self._name = path[len(root) + 1:]
         else:
-            # We prepend "./" for compatibility with DejaGNU.
+            # We prepend "./" for output compatibility with DejaGNU.
             self._name = os.path.join(".", os.path.basename(path))
         for l in open(path).xreadlines():
             line_num += 1
@@ -147,7 +148,7 @@
 
         # Run the tool being tested.
         output, file = self._RunTool(path, self._kind,
-                                     tool_flags + " " + self._options,
+                                     tool_flags + self._options,
                                      context,
                                      result)
 
@@ -191,11 +192,17 @@
         output = re.sub(r"\n+", "", output)
         # If there's any output left, the test fails.
         message = self._name + " (test for excess errors)"
+        if self._excess_errors_expected:
+            expected = self.FAIL
+        else:
+            expected = self.PASS
         if output != "":
-            self._RecordDejaGNUOutcome(result, self.FAIL, message)
+            self._RecordDejaGNUOutcome(result, self.FAIL,
+                                       message, expected)
             result["DGTest.excess_errors"] = "<pre>" + output + "</pre>"
         else:
-            self._RecordDejaGNUOutcome(result, self.PASS, message)
+            self._RecordDejaGNUOutcome(result, self.PASS,
+                                       message, expected)
 
         # Run the generated program.
         if self._kind == "run":
@@ -258,8 +265,8 @@
         
         'kind' -- The kind of test to perform.
 
-        'options' -- A string giving command-line options to provide
-        to the tool.
+        'options' -- A list of strings giving command-line options to
+        provide to the tool.
 
         'context' -- The 'Context' for the test execution.
 
@@ -338,11 +345,11 @@
         if len(args) >= 2:
             code = self._ParseTargetSelector(args[1], context)
             if code == "S":
-                self._options = args[0]
+                self._options = self._ParseTclWords(args[0])
             elif code != "N":
                 self._Error("'dg-options': 'xfail' not allowed here")
         else:
-            self._options = args[0]
+            self._options = self._ParseTclWords(args[0])
 
 
     def _DGbogus(self, line_num, args, context):
@@ -382,6 +389,27 @@
         'context' -- The 'Context' in which the test is running."""
 
         self.__ExpectDiagnostic(self.__DIAG_ERROR, line_num, args, context)
+
+
+    def _DGexcess_errors(self, line_num, args, context):
+        """Emulate the 'dg-excess-errors' command.
+
+        'line_num' -- The line number at which the command was found.
+
+        'args' -- The arguments to the command, as a list of
+        strings.
+
+        'context' -- The 'Context' in which the test is running."""
+
+        if len(args) > 2:
+            self._Error("'dg-excess-errors': too many arguments")
+
+        if len(args) >= 2:
+            code = self._ParseTargetSelector(args[1], context)
+            if code in ("F", "S"):
+                self._excess_errors_expected = True
+        else:
+            self._excess_errors_expected = True
 
 
     def __ExpectDiagnostic(self, kind, line_num, args, context):
Index: qm/test/classes/tet_stream.py
===================================================================
RCS file: /home/qm/Repository/qm/qm/test/classes/tet_stream.py,v
retrieving revision 1.2
diff -u -r1.2 tet_stream.py
--- qm/test/classes/tet_stream.py	20 Feb 2004 10:08:52 -0000	1.2
+++ qm/test/classes/tet_stream.py	15 Mar 2004 21:02:45 -0000
@@ -30,24 +30,102 @@
 
 class TETStream(FileResultStream):
     """A 'TETStream' formats results as a TET journal.
-
-    Provides special handling for 'DejaGNUTest' results.
     
+    Provides special handling for 'DejaGNUTest' results.
+
     TET: http://tetworks.opengroup.org/
     TET journal format: see appendix C and D of
        http://tetworks.opengroup.org/documents/3.7/uguide.pdf
 
-    """
+    For the meaning of TET result codes, we use as guidelines the LSB
+    test faq, question Q1.11:
+        * PASS - a test result belonging to this group is considered to
+          be a pass for compliance testing purposes:
+              o Pass - the test has been executed correctly and to
+                completion without any kind of problem
+              o Warning - the functionality is acceptable, but you
+                should be aware that later revisions of the relevant
+                standards or specification may change the requirements
+                in this area.
+              o FIP - additional information is provided which needs to
+                be checked manually.
+              o Unsupported - an optional feature is not available or
+                not supported in the implementation under test.
+              o Not in Use - some tests may not be required in certain
+                test modes or when an interface can be implemented by a
+                macro or function and there are two versions of the test
+                only one is used.
+              o Untested - no test written to check a particular feature
+                or an optional facility needed to perform a test is not
+                available on the system.
+          [There are also "notimp" and "unapproved" cases mentioned in
+          the LSB-FHS README, but they are otherwise undocumented, and
+          don't correspond to any DejaGNU or QMTest outcomes anyway.]
+        * FAIL - a test result belonging to this group is considered to
+          be a fail for compliance testing purposes (unless the failure
+          has been waived by an agreed Problem Report in the
+          Certification Problem Reporting database):
+              o Fail - the interface did not behave as expected.
+              o Uninitiated - the particular test in question did not
+                start to execute.
+              o Unresolved - the test started but did not reach the
+                point where the test was able to report success or
+                failure.
+              o Unreported - a major error occurred during the testset
+                execution.  (The TET manual calls this NORESULT.)
+    (From http://www.linuxbase.org/test/lsb-runtime-test-faq.html )
+    
+    DejaGNU test results are described as:
+        * PASS - A test has succeeded.
+        * FAIL - A test has produced the bug it was intended to
+          capture.
+        * WARNING - Declares detection of a minor error in the test case
+          itself.  Use WARNING rather than ERROR for cases (such as
+          communication failure to be followed by a retry) where the
+          test case can recover from the error.  Note that sufficient
+          warnings will cause a test to go from PASS/FAIL to
+          UNRESOLVED.
+        * ERROR - Declares a severe error in the testing framework
+          itself.  An ERROR also causes a test to go from PASS/FAIL to
+          UNRESOLVED.
+        * UNRESOLVED - A test produced indeterminate results.  Usually,
+          this means the test executed in an unexpected fashion; this
+          outcome requires that a human being go over results, to
+          determine if the test should have passed or failed.  This
+          message is also used for any test that requires human
+          intervention because it is beyond the abilities of the testing
+          framework.  Any unresolved test should be resolved to PASS or
+          FAIL before a test run can be considered finished.
+
+          Examples:
+              - a test's execution is interrupted
+              - a test does not produce a clear result (because of
+                WARNING or ERROR messages)
+              - a test depends on a previous test case which failed
+        * UNTESTED - a test case that isn't run for some technical
+          reason.  (E.g., a dummy test created as a placeholder for a
+          test that is not yet written.)
+        * UNSUPPORTED - Declares that a test case depends on some
+          facility that does not exist in the testing environment; the
+          test is simply meaningless.
+    (From a combination of DejaGNU manual sections "Core Internal
+    Procedures", "C Unit Testing API", and "A POSIX conforming test
+    framework".)
 
+    """
+    
     # TET result codes:
     PASS = (0, "PASS")
-    FAIL = (1, "FAIL")
-    UNRESOLVED = (2, "UNRESOLVED")
-    NOTINUSE = (3, "NOTINUSE")
+    WARNING = (101, "WARNING")
+    FIP = (102, "FIP")
     UNSUPPORTED = (4, "UNSUPPORTED")
+    NOTINUSE = (3, "NOTINUSE")
     UNTESTED = (5, "UNTESTED")
+
+    FAIL = (1, "FAIL")
     UNINITIATED = (6, "UNINITIATED")
-    NORESULT = (7, "NORESULT")
+    UNRESOLVED = (2, "UNRESOLVED")
+    UNREPORTED = (7, "UNREPORTED")
 
 
     def __init__(self, arguments):
@@ -238,25 +316,6 @@
                             "%i %i %s"
                             % (self._tcc_number, purpose, start_time),
                             "")
-            outcome_num, outcome_name \
-                         = { DejaGNUTest.PASS: self.PASS,
-                             DejaGNUTest.XPASS: self.PASS,
-                             DejaGNUTest.FAIL: self.FAIL,
-                             DejaGNUTest.XFAIL: self.FAIL,
-                             DejaGNUTest.WARNING: self.NORESULT,
-                             DejaGNUTest.ERROR: self.NORESULT,
-                             DejaGNUTest.UNTESTED: self.UNTESTED,
-                             DejaGNUTest.UNRESOLVED: self.UNRESOLVED,
-                             DejaGNUTest.UNSUPPORTED: self.UNSUPPORTED,
-                           }[outcome]
-            # Test purpose result
-            # 220 | activity_number tp_number result time | result-name
-            data = "%i %i %i %s" % (self._tcc_number,
-                                    purpose,
-                                    outcome_num,
-                                    end_time)
-            self._WriteLine(220, data, outcome_name)
-            
             if outcome == DejaGNUTest.WARNING:
                 # Test case information
                 # 520 | activity_num tp_num context block sequence | text
@@ -265,16 +324,37 @@
                                 "%i %i 0 1 1" % (self._tcc_number,
                                                  purpose),
                                 "WARNING")
-            if outcome == DejaGNUTest.ERROR:
-                # Test case controller message
-                # 50 || text describing problem
+            elif outcome == DejaGNUTest.ERROR:
+                # Test case information
+                # 520 | activity_num tp_num context block sequence | text
                 # (see _WriteResultAnnotations for details)
                 self._WriteLine(520,
                                 "%i %i 0 1 1" % (self._tcc_number,
                                                  purpose),
                                 "ERROR")
+            else:
+                outcome_num, outcome_name \
+                    = { DejaGNUTest.PASS: self.PASS,
+                        DejaGNUTest.XPASS: self.PASS,
+                        DejaGNUTest.FAIL: self.FAIL,
+                        DejaGNUTest.XFAIL: self.FAIL,
+                        DejaGNUTest.UNTESTED: self.UNTESTED,
+                        DejaGNUTest.UNRESOLVED: self.UNRESOLVED,
+                        # TET's UNSUPPORTED is like a FAIL for tests
+                        # that check for optional features; UNTESTED is
+                        # the correct correspondent for DejaGNU's
+                        # UNSUPPORTED.
+                        DejaGNUTest.UNSUPPORTED: self.UNTESTED,
+                        }[outcome]
+                # Test purpose result
+                # 220 | activity_number tp_number result time | result-name
+                data = "%i %i %i %s" % (self._tcc_number,
+                                        purpose,
+                                        outcome_num,
+                                        end_time)
+                self._WriteLine(220, data, outcome_name)
 
-            purpose += 1
+                purpose += 1
             
         # Test case end
         # 80 | activity_number completion_status time | text
@@ -297,8 +377,8 @@
 
         outcome_num, outcome_name = { Result.FAIL: self.FAIL,
                                       Result.PASS: self.PASS,
-                                      Result.UNTESTED: self.UNTESTED,
-                                      Result.ERROR: self.NORESULT,
+                                      Result.UNTESTED: self.UNINITIATED,
+                                      Result.ERROR: self.UNREPORTED,
                                     }[result.GetOutcome()]
         # Test purpose result
         # 220 | activity_number tp_number result time | result-name
@@ -307,8 +387,8 @@
         self._WriteLine(220, data, outcome_name)
 
         if result.GetOutcome() == Result.ERROR:
-            # Test case controller message
-            # 50 || text describing problem
+            # Test case information
+            # 520 | activity_num tp_num context block sequence | text
             # (see _WriteResultAnnotations for details)
             self._WriteLine(520,
                             "%i 0 0 1 1" % self._tcc_number,


More information about the qmtest mailing list