...
 
Commits (4)
......@@ -35,15 +35,21 @@ cd ${sources_dir}/scientific-computing-use-cases
with `${path_to_output_dir}` pointing to the file system that should be tested.
## Modifying the timeout
## Modifying the timeout (removed)
We can use an env var `${PYTEST_TIMEOUT_PER_TEST}` to set the maximal number of
seconds a single test is allowed to take. Default is 5 seconds. To allow for,
e.g., up to 10 seconds per test, run the tests with
* Timeouts are buggy in pytest. The use of `pytest.mark.timeout` has been
removed. *
## Including the large tests
Setting an environment variable `PYTEST_LARGE_TESTS` to `TRUE`, will include
tests up to arrays of `256^4` double-precision numbers. This will amount to
about 34 GiB written to and read from disk:
```bash
cd ${sources_dir}/scientific-computing-use-cases
PYTEST_TIMEOUT_PER_TEST=10 ./run.sh ${path_to_output_dir}
PYTEST_LARGE_TESTS=TRUE ./run.sh ${path_to_output_dir}
```
......
......@@ -13,7 +13,15 @@ source ${mc3_target_dir}/bin/activate py3_std || \
echo "Please run set_up_environment.sh first.";
exit; }
# If we're running the large tests, allow for 24GB virtual memory instead of 8
if [ $PYTEST_LARGE_TESTS == "TRUE" ]; then
vlim=32
else
vlim=8
fi
ulimit -v "$(expr ${vlim} \* 1000 \* 1000)"
# run all tests using a local tmp dir and monitor
# timing of all tests
ulimit -v "$(expr 8 \* 1000 \* 1000)"
pytest -v -s --basetemp=${tmp_dir} --durations=0 tests/
......@@ -11,13 +11,9 @@ import os
np.random.seed(137)
# read timeout from environment
def _get_timeout():
if "PYTEST_TIMEOUT_PER_TEST" in os.environ:
pytest_timeout_per_test = float(os.environ["PYTEST_TIMEOUT_PER_TEST"])
else:
pytest_timeout_per_test = 5.0 # seconds
return pytest_timeout_per_test
# do we want to run the large tests?
def _run_large_tests():
return (os.environ.get("PYTEST_LARGE_TESTS", "false").upper() == "TRUE")
def _get_xr_data_array(array):
......@@ -33,20 +29,19 @@ def temp_dir(tmpdir_factory):
temp_dir.remove()
@pytest.mark.parametrize(
"ndim",
[3,
pytest.mark.xfail(reason="Expect out-of-memory errors")(4)])
@pytest.mark.parametrize("ndim", [3, 4])
@pytest.mark.parametrize(
"N",
list(2 ** n for n in range(5, 6)) +
list(pytest.mark.xfail(reason="Expect out-of-memory errors")(2 ** n)
for n in range(6, 8)))
list(2 ** n for n in range(5, 8)) +
list(pytest.mark.xfail(reason="Expect out-of-memory errors")(
pytest.mark.skipif(
not _run_large_tests(),
reason="User did not ask for the large tests")(2 ** n))
for n in [8]))
@pytest.mark.parametrize("num_chunks", [2, 8])
@pytest.mark.parametrize("use_zarr_or_nc4", ["zarr", "nc4"])
@pytest.mark.parametrize("only_write", [False, True])
@pytest.mark.parametrize("threads_per_worker", [4, 1])
@pytest.mark.timeout(_get_timeout())
def test_large_arrays(num_chunks, N, ndim, temp_dir, use_zarr_or_nc4,
only_write, threads_per_worker):
"""Write random data sets to disk and re-read check for corrupted data.
......