From 16fd3c9ebb2a267f7a13961a500628ee2db0f356 Mon Sep 17 00:00:00 2001 From: Kieran Spear Date: Thu, 15 Nov 2012 14:40:25 +1100 Subject: [PATCH] Allow running tests individually If there is a non-zero number of arguments, run_tests.sh now passes through the arguments to `manage.py test`, skipping the default horizon/openstack_dashboard suites. The correct settings module is chosen based on the first module given in the argument. E.g., horizon.test.tests.tables chooses the horizon.test.settings module. Change-Id: I5321e87bec6831fb7574e045a82de06086b1d0d0 --- doc/source/ref/run_tests.rst | 19 ++++++++++++++++++ doc/source/testing.rst | 2 ++ openstack_dashboard/test/tests/__init__.py | 0 run_tests.sh | 23 ++++++++++++++++++---- 4 files changed, 40 insertions(+), 4 deletions(-) create mode 100644 openstack_dashboard/test/tests/__init__.py diff --git a/doc/source/ref/run_tests.rst b/doc/source/ref/run_tests.rst index b74aa1c57..5bcc1d754 100644 --- a/doc/source/ref/run_tests.rst +++ b/doc/source/ref/run_tests.rst @@ -43,6 +43,25 @@ tests by using the ``--skip-selenium`` flag:: This isn't recommended, but can be a timesaver when you only need to run the code tests and not the frontend tests during development. +Running a subset of tests +------------------------- + +Instead of running all tests, you can specify an individual directory, file, +class, or method that contains test code. + +To run the tests in the ``horizon/test/tests/tables.py`` file:: + + ./run_tests.sh horizon.test.tests.tables + +To run the tests in the `WorkflowsTests` class in +``horizon/test/tests/workflows``:: + + ./run_tests.sh horizon.test.tests.workflows:WorkflowsTests + +To run just the `WorkflowsTests.test_workflow_view` test method:: + + ./run_tests.sh horizon.test.tests.workflows:WorkflowsTests.test_workflow_view + Using Dashboard and Panel Templates =================================== diff --git a/doc/source/testing.rst b/doc/source/testing.rst index b3b150382..d9362327e 100644 --- a/doc/source/testing.rst +++ b/doc/source/testing.rst @@ -19,6 +19,8 @@ before you submit any pull requests/patches. To run the tests:: $ ./run_tests.sh + +It's also possible to :doc:`run a subset of unit tests`. .. seealso:: diff --git a/openstack_dashboard/test/tests/__init__.py b/openstack_dashboard/test/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/run_tests.sh b/run_tests.sh index e4e594f8d..708953649 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -69,6 +69,7 @@ restore_env=0 runserver=0 only_selenium=0 with_selenium=0 +testopts="" testargs="" with_coverage=0 makemessages=0 @@ -99,17 +100,18 @@ function process_option { --backup-environment) backup_env=1;; --restore-environment) restore_env=1;; --destroy-environment) destroy=1;; + -*) testopts="$testopts $1";; *) testargs="$testargs $1" esac } function run_management_command { - ${command_wrapper} python $root/manage.py $testargs + ${command_wrapper} python $root/manage.py $testopts $testargs } function run_server { echo "Starting Django development server..." - ${command_wrapper} python $root/manage.py runserver $testargs + ${command_wrapper} python $root/manage.py runserver $testopts $testargs echo "Server stopped." } @@ -272,13 +274,26 @@ function run_tests { export SKIP_UNITTESTS=1 fi + if [ -z "$testargs" ]; then + run_tests_all + else + run_tests_subset + fi +} + +function run_tests_subset { + project=`echo $testargs | awk -F. '{print $1}'` + ${command_wrapper} python $root/manage.py test --settings=$project.test.settings $testopts $testargs +} + +function run_tests_all { echo "Running Horizon application tests" export NOSE_XUNIT_FILE=horizon/nosetests.xml if [ "$NOSE_WITH_HTML_OUTPUT" = '1' ]; then export NOSE_HTML_OUT_FILE='horizon_nose_results.html' fi ${command_wrapper} coverage erase - ${command_wrapper} coverage run -p $root/manage.py test horizon --settings=horizon.test.settings $testargs + ${command_wrapper} coverage run -p $root/manage.py test horizon --settings=horizon.test.settings $testopts # get results of the Horizon tests HORIZON_RESULT=$? @@ -287,7 +302,7 @@ function run_tests { if [ "$NOSE_WITH_HTML_OUTPUT" = '1' ]; then export NOSE_HTML_OUT_FILE='dashboard_nose_results.html' fi - ${command_wrapper} coverage run -p $root/manage.py test openstack_dashboard --settings=openstack_dashboard.test.settings $testargs + ${command_wrapper} coverage run -p $root/manage.py test openstack_dashboard --settings=openstack_dashboard.test.settings $testopts # get results of the openstack_dashboard tests DASHBOARD_RESULT=$?