Skip to content

Commit

Permalink
enable appveyor testing of cwltest (#72)
Browse files Browse the repository at this point in the history
* enable appveyor testing of cwltest
* decrease default timeout
* include mock-cwl-runner in appveyor
* always upload the test results
* enable pip cache preservation
* make tests windows compatible
* add system python to path
* add test for path normalization
* Fix the bug proven by the new test
* speed up appveyor builds by removing pip, virtualenv installs & drop py-32bit versions
  • Loading branch information
mr-c authored May 12, 2018
1 parent 6bffd59 commit 6226d05
Show file tree
Hide file tree
Showing 6 changed files with 173 additions and 74 deletions.
45 changes: 45 additions & 0 deletions appveyor.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
version: .{build}-{branch}

cache:
- '%LOCALAPPDATA%\pip\Cache'

environment:
SYSTEMROOT: "C:\\WINDOWS"

matrix:
- PYTHON: "C:\\Python27-x64"
PYTHON_VERSION: "2.7.x"
PYTHON_ARCH: "64"

- PYTHON: "C:\\Python34-x64"
PYTHON_VERSION: "3.4.x"
PYTHON_ARCH: "64"

- PYTHON: "C:\\Python35-x64"
PYTHON_VERSION: "3.5.x"
PYTHON_ARCH: "64"

- PYTHON: "C:\\Python36-x64"
PYTHON_VERSION: "3.6.x"
PYTHON_ARCH: "64"

install:
- "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%"

build_script:
- |
%PYTHON%\\python.exe -m pip install -U wheel pytest pytest-xdist
%PYTHON%\\python.exe -m pip install -e .
test_script:
- |
%PYTHON%\\python.exe setup.py test --addopts "--verbose -p no:cacheprovider --junit-xml=tests.xml -n2"
on_finish:
- ps: |
$wc = New-Object 'System.Net.WebClient'
$wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($Env:APPVEYOR_JOB_ID)", (Resolve-Path .\tests.xml))
branches:
only:
- master
153 changes: 87 additions & 66 deletions cwltest/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,77 +7,87 @@
import json
import logging
import os
import pipes
import shutil
import sys
import tempfile
import threading
import time
from typing import Any, Dict, List, Optional, Text
from concurrent.futures import ThreadPoolExecutor

import ruamel.yaml as yaml
import ruamel.yaml.scanner as yamlscanner
import schema_salad.ref_resolver
from concurrent.futures import ThreadPoolExecutor
from six.moves import range
from six.moves import zip
from typing import Any, Dict, List
import pkg_resources # part of setuptools

import junit_xml
from cwltest.utils import compare, CompareFail, TestResult, REQUIRED, get_test_number_by_key
from cwltest.utils import (compare, CompareFail, TestResult, REQUIRED,
get_test_number_by_key)

_logger = logging.getLogger("cwltest")
_logger.addHandler(logging.StreamHandler())
_logger.setLevel(logging.INFO)

UNSUPPORTED_FEATURE = 33
DEFAULT_TIMEOUT = 900 # 15 minutes
DEFAULT_TIMEOUT = 600 # 10 minutes

if sys.version_info < (3, 0):
import subprocess32 as subprocess
from pipes import quote
else:
import subprocess
from shlex import quote

templock = threading.Lock()


def prepare_test_command(args, i, tests):
# type: (argparse.Namespace, int, List[Dict[str, str]]) -> List[str]
t = tests[i]
test_command = [args.tool]
test_command.extend(args.args)
def prepare_test_command(tool, # type: str
args, # type: List[str]
testargs, # type: Optional[List[str]]
test # type: Dict[str, str]
): # type: (...) -> List[str]
""" Turn the test into a command line. """
test_command = [tool]
test_command.extend(args)

# Add additional arguments given in test case
if args.testargs is not None:
for testarg in args.testargs:
if testargs is not None:
for testarg in testargs:
(test_case_name, prefix) = testarg.split('==')
if test_case_name in t:
test_command.extend([prefix, t[test_case_name]])
if test_case_name in test:
test_command.extend([prefix, test[test_case_name]])

# Add prefixes if running on MacOSX so that boot2docker writes to /Users
with templock:
if 'darwin' in sys.platform and args.tool == 'cwltool':
if 'darwin' in sys.platform and tool == 'cwltool':
outdir = tempfile.mkdtemp(prefix=os.path.abspath(os.path.curdir))
test_command.extend(["--tmp-outdir-prefix={}".format(outdir), "--tmpdir-prefix={}".format(outdir)])
test_command.extend(["--tmp-outdir-prefix={}".format(outdir),
"--tmpdir-prefix={}".format(outdir)])
else:
outdir = tempfile.mkdtemp()
test_command.extend(["--outdir={}".format(outdir),
"--quiet",
t["tool"]])
if t.get("job"):
test_command.append(t["job"])
os.path.normcase(test["tool"])])
if test.get("job"):
test_command.append(os.path.normcase(test["job"]))
return test_command


def run_test(args, i, tests, timeout):
# type: (argparse.Namespace, int, List[Dict[str, str]], int) -> TestResult
def run_test(args, # type: argparse.Namespace
test, # type: Dict[str, str]
test_number, # type: int
total_tests, # type: int
timeout # type: int
): # type: (...) -> TestResult

global templock

out = {} # type: Dict[str,Any]
outdir = outstr = outerr = test_command = None
outdir = outstr = outerr = None
test_command = [] # type: List[str]
duration = 0.0
t = tests[i]
prefix = ""
suffix = ""
if sys.stderr.isatty():
Expand All @@ -86,12 +96,18 @@ def run_test(args, i, tests, timeout):
suffix = "\n"
try:
process = None # type: subprocess.Popen
test_command = prepare_test_command(args, i, tests)

if t.get("short_name"):
sys.stderr.write("%sTest [%i/%i] %s: %s%s\n" % (prefix, i + 1, len(tests), t.get("short_name"), t.get("doc"), suffix))
test_command = prepare_test_command(
args.tool, args.args, args.testargs, test)

if test.get("short_name"):
sys.stderr.write(
"%sTest [%i/%i] %s: %s%s\n"
% (prefix, test_number, total_tests, test.get("short_name"),
test.get("doc"), suffix))
else:
sys.stderr.write("%sTest [%i/%i] %s%s\n" % (prefix, i + 1, len(tests), t.get("doc"), suffix))
sys.stderr.write(
"%sTest [%i/%i] %s%s\n"
% (prefix, test_number, total_tests, test.get("doc"), suffix))
sys.stderr.flush()

start_time = time.time()
Expand All @@ -104,38 +120,40 @@ def run_test(args, i, tests, timeout):
raise subprocess.CalledProcessError(return_code, " ".join(test_command))

out = json.loads(outstr)
except ValueError as v:
_logger.error(str(v))
except ValueError as err:
_logger.error(str(err))
_logger.error(outstr)
_logger.error(outerr)
except subprocess.CalledProcessError as err:
if err.returncode == UNSUPPORTED_FEATURE:
return TestResult(UNSUPPORTED_FEATURE, outstr, outerr, duration, args.classname)
elif t.get("should_fail", False):
if test.get("should_fail", False):
return TestResult(0, outstr, outerr, duration, args.classname)
else:
_logger.error(u"""Test failed: %s""", " ".join([pipes.quote(tc) for tc in test_command]))
_logger.error(t.get("doc"))
_logger.error("Returned non-zero")
_logger.error(outerr)
return TestResult(1, outstr, outerr, duration, args.classname, str(err))
except (yamlscanner.ScannerError, TypeError) as e:
_logger.error(u"""Test failed: %s""", " ".join([pipes.quote(tc) for tc in test_command]))
_logger.error(u"""Test failed: %s""", " ".join([quote(tc) for tc in test_command]))
_logger.error(test.get("doc"))
_logger.error(u"Returned non-zero")
_logger.error(outerr)
return TestResult(1, outstr, outerr, duration, args.classname, str(err))
except (yamlscanner.ScannerError, TypeError) as err:
_logger.error(u"""Test failed: %s""",
u" ".join([quote(tc) for tc in test_command]))
_logger.error(outstr)
_logger.error(u"Parse error %s", str(e))
_logger.error(u"Parse error %s", str(err))
_logger.error(outerr)
except KeyboardInterrupt:
_logger.error(u"""Test interrupted: %s""", " ".join([pipes.quote(tc) for tc in test_command]))
_logger.error(u"""Test interrupted: %s""",
u" ".join([quote(tc) for tc in test_command]))
raise
except subprocess.TimeoutExpired:
_logger.error(u"""Test timed out: %s""", " ".join([pipes.quote(tc) for tc in test_command]))
_logger.error(t.get("doc"))
_logger.error(u"""Test timed out: %s""",
u" ".join([quote(tc) for tc in test_command]))
_logger.error(test.get("doc"))
return TestResult(2, outstr, outerr, timeout, args.classname, "Test timed out")
finally:
if process is not None and process.returncode is None:
_logger.error(u"""Terminating lingering process""")
process.terminate()
for a in range(0, 3):
for _ in range(0, 3):
time.sleep(1)
if process.poll() is not None:
break
Expand All @@ -144,24 +162,25 @@ def run_test(args, i, tests, timeout):

fail_message = ''

if t.get("should_fail", False):
_logger.warning(u"""Test failed: %s""", " ".join([pipes.quote(tc) for tc in test_command]))
_logger.warning(t.get("doc"))
if test.get("should_fail", False):
_logger.warning(u"""Test failed: %s""", u" ".join([quote(tc) for tc in test_command]))
_logger.warning(test.get("doc"))
_logger.warning(u"Returned zero but it should be non-zero")
return TestResult(1, outstr, outerr, duration, args.classname)

try:
compare(t.get("output"), out)
compare(test.get("output"), out)
except CompareFail as ex:
_logger.warning(u"""Test failed: %s""", " ".join([pipes.quote(tc) for tc in test_command]))
_logger.warning(t.get("doc"))
_logger.warning(u"""Test failed: %s""", u" ".join([quote(tc) for tc in test_command]))
_logger.warning(test.get("doc"))
_logger.warning(u"Compare failure %s", ex)
fail_message = str(ex)

if outdir:
shutil.rmtree(outdir, True)

return TestResult((1 if fail_message else 0), outstr, outerr, duration, args.classname, fail_message)
return TestResult((1 if fail_message else 0), outstr, outerr, duration,
args.classname, fail_message)


def arg_parser(): # type: () -> argparse.ArgumentParser
Expand All @@ -175,17 +194,18 @@ def arg_parser(): # type: () -> argparse.ArgumentParser
help="CWL runner executable to use (default 'cwl-runner'")
parser.add_argument("--only-tools", action="store_true", help="Only test CommandLineTools")
parser.add_argument("--junit-xml", type=str, default=None, help="Path to JUnit xml file")
parser.add_argument("--test-arg", type=str, help="Additional argument given in test cases and "
"required prefix for tool runner.",
metavar="cache==--cache-dir", action="append", dest="testargs")
parser.add_argument("--test-arg", type=str, help="Additional argument "
"given in test cases and required prefix for tool runner.",
default=None, metavar="cache==--cache-dir", action="append", dest="testargs")
parser.add_argument("args", help="arguments to pass first to tool runner", nargs=argparse.REMAINDER)
parser.add_argument("-j", type=int, default=1, help="Specifies the number of tests to run simultaneously "
"(defaults to one).")
parser.add_argument("--verbose", action="store_true", help="More verbose output during test run.")
parser.add_argument("--classname", type=str, default="", help="Specify classname for the Test Suite.")
parser.add_argument("--timeout", type=int, default=DEFAULT_TIMEOUT, help="Time of execution in seconds after "
"which the test will be skipped. "
"Defaults to 900 sec (15 minutes)")
parser.add_argument("--timeout", type=int, default=DEFAULT_TIMEOUT,
help="Time of execution in seconds after which the test will be "
"skipped. Defaults to {} seconds ({} minutes).".format(
DEFAULT_TIMEOUT, DEFAULT_TIMEOUT/60))

pkg = pkg_resources.require("cwltest")
if pkg:
Expand Down Expand Up @@ -256,14 +276,14 @@ def main(): # type: () -> int
if test_number:
ntest.append(test_number)
else:
_logger.error('Test with short name "%s" not found ' % s)
_logger.error('Test with short name "%s" not found ', s)
return 1
else:
ntest = list(range(0, len(tests)))

total = 0
with ThreadPoolExecutor(max_workers=args.j) as executor:
jobs = [executor.submit(run_test, args, i, tests, args.timeout)
jobs = [executor.submit(run_test, args, tests[i], i+1, len(tests), args.timeout)
for i in ntest]
try:
for i, job in zip(ntest, jobs):
Expand Down Expand Up @@ -294,18 +314,19 @@ def main(): # type: () -> int
_logger.error("Tests interrupted")

if args.junit_xml:
with open(args.junit_xml, 'w') as fp:
junit_xml.TestSuite.to_file(fp, [report])
with open(args.junit_xml, 'w') as xml:
junit_xml.TestSuite.to_file(xml, [report])

if failures == 0 and unsupported == 0:
_logger.info("All tests passed")
return 0
elif failures == 0 and unsupported > 0:
_logger.warning("%i tests passed, %i unsupported features", total - unsupported, unsupported)
if failures == 0 and unsupported > 0:
_logger.warning("%i tests passed, %i unsupported features",
total - unsupported, unsupported)
return 0
else:
_logger.warning("%i tests passed, %i failures, %i unsupported features", total - (failures + unsupported), failures, unsupported)
return 1
_logger.warning("%i tests passed, %i failures, %i unsupported features",
total - (failures + unsupported), failures, unsupported)
return 1


if __name__ == "__main__":
Expand Down
14 changes: 9 additions & 5 deletions cwltest/tests/test_categories.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import unittest

import os
from os import linesep as n

from .util import run_with_mock_cwl_runner, get_data
import xml.etree.ElementTree as ET
Expand All @@ -12,16 +13,19 @@ def test_unsupported_with_required_tests(self):
args = ["--test", get_data("tests/test-data/required-unsupported.yml")]
error_code, stdout, stderr = run_with_mock_cwl_runner(args)
self.assertEquals(error_code, 1)
self.assertEquals("Test [1/2] Required test that is unsupported (without tags)\n\n"
"Test [2/2] Required test that is unsupported (with tags)\n\n"
"0 tests passed, 2 failures, 0 unsupported features\n", stderr)
self.assertEquals(
"Test [1/2] Required test that is unsupported (without tags){n}{n}"
"Test [2/2] Required test that is unsupported (with tags){n}{n}"
"0 tests passed, 2 failures, 0 unsupported "
"features{n}".format(n=n), stderr)

def test_unsupported_with_optional_tests(self):
args = ["--test", get_data("tests/test-data/optional-unsupported.yml")]
error_code, stdout, stderr = run_with_mock_cwl_runner(args)
self.assertEquals(error_code, 0)
self.assertEquals("Test [1/1] Optional test that is unsupported\n\n"
"0 tests passed, 1 unsupported features\n", stderr)
self.assertEquals("Test [1/1] Optional test that is unsupported{n}{n}"
"0 tests passed, 1 unsupported "
"features{n}".format(n=n), stderr)

def test_error_with_optional_tests(self):
args = ["--test", get_data("tests/test-data/optional-error.yml")]
Expand Down
25 changes: 25 additions & 0 deletions cwltest/tests/test_prepare.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import os
import unittest
from cwltest import prepare_test_command


class TestPrepareCommand(unittest.TestCase):
""" Test prepare_test_command() """

def test_unix_relative_path(self):
""" Confirm unix style to windows style path corrections. """
command = prepare_test_command(
tool='cwl-runner',
args=[],
testargs=None,
test={'doc': 'General test of command line generation',
'output': {'args': ['echo']},
'tool': 'v1.0/bwa-mem-tool.cwl',
'job': 'v1.0/bwa-mem-job.json',
'tags': ['required']})
if os.name == 'nt':
self.assertEqual(command[3], 'v1.0\\bwa-mem-tool.cwl')
self.assertEqual(command[4], 'v1.0\\bwa-mem-job.json')
else:
self.assertEqual(command[3], 'v1.0/bwa-mem-tool.cwl')
self.assertEqual(command[4], 'v1.0/bwa-mem-job.json')
Loading

0 comments on commit 6226d05

Please sign in to comment.