Use tox to run unit test

Prev we use:
source /path/to/somewhere/.venv/bin/activate
cd $WORKSPACE/code/daisy/daisy/tests
python -m unittest discover

With this patch, we can now use tox -epy27 to run our unit test.

Change-Id: I71c30cf319ad15d778165bd6f787a1d48df838ac
Signed-off-by: Zhijiang Hu <hu.zhijiang@zte.com.cn>
This commit is contained in:
Zhijiang Hu 2016-11-26 04:18:44 -05:00
parent fefb4889c8
commit bd6ae775f2
15 changed files with 37 additions and 1104 deletions

3
code/daisy/.testr.conf Normal file
View File

@ -0,0 +1,3 @@
[DEFAULT]
test_command=${PYTHON:-python} -m unittest discover $LISTOPT
test_list_option=

View File

@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import glance_store as store
from oslo_log import log as logging from oslo_log import log as logging
import webob.exc import webob.exc

View File

@ -27,14 +27,12 @@ import sys
import eventlet import eventlet
from daisy.common import utils from daisy.common import utils
import glance_store
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
import osprofiler.notifier import osprofiler.notifier
import osprofiler.web import osprofiler.web
from daisy.common import config from daisy.common import config
from daisy.common import exception
from daisy.common import wsgi from daisy.common import wsgi
from daisy import notifier from daisy import notifier
from oslo_service import systemd from oslo_service import systemd
@ -56,14 +54,9 @@ CONF = cfg.CONF
CONF.import_group("profiler", "daisy.common.wsgi") CONF.import_group("profiler", "daisy.common.wsgi")
logging.register_options(CONF) logging.register_options(CONF)
KNOWN_EXCEPTIONS = (RuntimeError,
exception.WorkerCreationFailure,
glance_store.exceptions.BadStoreConfiguration)
def fail(e): def fail(e):
global KNOWN_EXCEPTIONS return_code = 100 # TODO: (huzhj) make this a meaningful value
return_code = KNOWN_EXCEPTIONS.index(type(e)) + 1
sys.stderr.write("ERROR: %s\n" % utils.exception_to_str(e)) sys.stderr.write("ERROR: %s\n" % utils.exception_to_str(e))
sys.exit(return_code) sys.exit(return_code)
@ -84,13 +77,12 @@ def main():
else: else:
osprofiler.web.disable() osprofiler.web.disable()
server = wsgi.Server(initialize_glance_store=True) server = wsgi.Server()
server.start(config.load_paste_app('daisy-api'), default_port=9292) server.start(config.load_paste_app('daisy-api'), default_port=9292)
systemd.notify_once() systemd.notify_once()
server.wait() server.wait()
except KNOWN_EXCEPTIONS as e: except Exception as e:
fail(e) fail(e)
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View File

@ -33,7 +33,6 @@ from eventlet.green import socket
from eventlet.green import ssl from eventlet.green import ssl
import eventlet.greenio import eventlet.greenio
import eventlet.wsgi import eventlet.wsgi
import glance_store
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
from oslo_concurrency import processutils from oslo_concurrency import processutils
from oslo_config import cfg from oslo_config import cfg
@ -213,13 +212,6 @@ def set_eventlet_hub():
reason=msg) reason=msg)
def initialize_glance_store():
"""Initialize glance store."""
glance_store.register_opts(CONF)
glance_store.create_stores(CONF)
glance_store.verify_default_store()
def get_asynchronous_eventlet_pool(size=1000): def get_asynchronous_eventlet_pool(size=1000):
"""Return eventlet pool to caller. """Return eventlet pool to caller.
@ -240,12 +232,9 @@ def get_asynchronous_eventlet_pool(size=1000):
class Server(object): class Server(object):
"""Server class to manage multiple WSGI sockets and applications. """Server class to manage multiple WSGI sockets and applications.
This class requires initialize_glance_store set to True if
glance store needs to be initialized.
""" """
def __init__(self, threads=1000, initialize_glance_store=False): def __init__(self, threads=1000):
os.umask(0o27) # ensure files are created with the correct privileges os.umask(0o27) # ensure files are created with the correct privileges
self._logger = logging.getLogger("eventlet.wsgi.server") self._logger = logging.getLogger("eventlet.wsgi.server")
self._wsgi_logger = loggers.WritableLogger(self._logger) self._wsgi_logger = loggers.WritableLogger(self._logger)
@ -253,9 +242,6 @@ class Server(object):
self.children = set() self.children = set()
self.stale_children = set() self.stale_children = set()
self.running = True self.running = True
# NOTE(abhishek): Allows us to only re-initialize glance_store when
# the API's configuration reloads.
self.initialize_glance_store = initialize_glance_store
self.pgid = os.getpid() self.pgid = os.getpid()
try: try:
# NOTE(flaper87): Make sure this process # NOTE(flaper87): Make sure this process
@ -374,8 +360,6 @@ class Server(object):
""" """
eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
self.configure_socket(old_conf, has_changed) self.configure_socket(old_conf, has_changed)
if self.initialize_glance_store:
initialize_glance_store()
def reload(self): def reload(self):
""" """

View File

@ -1,6 +1,6 @@
import mock import mock
import webob import webob
from oslo.serialization import jsonutils from oslo_serialization import jsonutils
from daisy.api.v1 import roles from daisy.api.v1 import roles
from daisy import context from daisy import context
from daisy import test from daisy import test

View File

@ -485,309 +485,3 @@ revocation_cache_time = 10
# the engine. The value can be greater than one when the engine mode is # the engine. The value can be greater than one when the engine mode is
# 'parallel' or 'worker-based', otherwise this value will be ignored. # 'parallel' or 'worker-based', otherwise this value will be ignored.
#max_workers = 10 #max_workers = 10
[glance_store]
# List of which store classes and store class locations are
# currently known to glance at startup.
# Deprecated group/name - [DEFAULT]/known_stores
# Existing but disabled stores:
# glance.store.rbd.Store,
# glance.store.s3.Store,
# glance.store.swift.Store,
# glance.store.sheepdog.Store,
# glance.store.cinder.Store,
# glance.store.gridfs.Store,
# glance.store.vmware_datastore.Store,
#stores = glance.store.filesystem.Store,
# glance.store.http.Store
# Which backend scheme should Glance use by default is not specified
# in a request to add a new image to Glance? Known schemes are determined
# by the stores option.
# Deprecated group/name - [DEFAULT]/default_store
# Default: 'file'
default_store = file
# ============ Filesystem Store Options ========================
# Directory that the Filesystem backend store
# writes image data to
filesystem_store_datadir = /var/lib/daisy/images/
# A list of directories where image data can be stored.
# This option may be specified multiple times for specifying multiple store
# directories. Either one of filesystem_store_datadirs or
# filesystem_store_datadir option is required. A priority number may be given
# after each directory entry, separated by a ":".
# When adding an image, the highest priority directory will be selected, unless
# there is not enough space available in cases where the image size is already
# known. If no priority is given, it is assumed to be zero and the directory
# will be considered for selection last. If multiple directories have the same
# priority, then the one with the most free space available is selected.
# If same store is specified multiple times then BadStoreConfiguration
# exception will be raised.
#filesystem_store_datadirs = /var/lib/glance/images/:1
# A path to a JSON file that contains metadata describing the storage
# system. When show_multiple_locations is True the information in this
# file will be returned with any location that is contained in this
# store.
#filesystem_store_metadata_file = None
# ============ Swift Store Options =============================
# Version of the authentication service to use
# Valid versions are '2' for keystone and '1' for swauth and rackspace
swift_store_auth_version = 2
# Address where the Swift authentication service lives
# Valid schemes are 'http://' and 'https://'
# If no scheme specified, default to 'https://'
# For swauth, use something like '127.0.0.1:8080/v1.0/'
swift_store_auth_address = 127.0.0.1:5000/v2.0/
# User to authenticate against the Swift authentication service
# If you use Swift authentication service, set it to 'account':'user'
# where 'account' is a Swift storage account and 'user'
# is a user in that account
swift_store_user = jdoe:jdoe
# Auth key for the user authenticating against the
# Swift authentication service
swift_store_key = a86850deb2742ec3cb41518e26aa2d89
# Container within the account that the account should use
# for storing images in Swift
swift_store_container = glance
# Do we create the container if it does not exist?
swift_store_create_container_on_put = False
# What size, in MB, should Glance start chunking image files
# and do a large object manifest in Swift? By default, this is
# the maximum object size in Swift, which is 5GB
swift_store_large_object_size = 5120
# swift_store_config_file = glance-swift.conf
# This file contains references for each of the configured
# Swift accounts/backing stores. If used, this option can prevent
# credentials being stored in the database. Using Swift references
# is disabled if this config is left blank.
# The reference to the default Swift parameters to use for adding new images.
# default_swift_reference = 'ref1'
# When doing a large object manifest, what size, in MB, should
# Glance write chunks to Swift? This amount of data is written
# to a temporary disk buffer during the process of chunking
# the image file, and the default is 200MB
swift_store_large_object_chunk_size = 200
# If set, the configured endpoint will be used. If None, the storage URL
# from the auth response will be used. The location of an object is
# obtained by appending the container and object to the configured URL.
#
# swift_store_endpoint = https://www.example.com/v1/not_a_container
#swift_store_endpoint =
# If set to True enables multi-tenant storage mode which causes Glance images
# to be stored in tenant specific Swift accounts.
#swift_store_multi_tenant = False
# If set to an integer value between 1 and 32, a single-tenant store will
# use multiple containers to store images. If set to the default value of 0,
# only a single container will be used. Multi-tenant stores are not affected
# by this option. The max number of containers that will be used to store
# images is approximately 16^N where N is the value of this option. Discuss
# the impact of this with your swift deployment team, as this option is only
# beneficial in the largest of deployments where swift rate limiting can lead
# to unwanted throttling on a single container.
#swift_store_multiple_containers_seed = 0
# A list of swift ACL strings that will be applied as both read and
# write ACLs to the containers created by Glance in multi-tenant
# mode. This grants the specified tenants/users read and write access
# to all newly created image objects. The standard swift ACL string
# formats are allowed, including:
# <tenant_id>:<username>
# <tenant_name>:<username>
# *:<username>
# Multiple ACLs can be combined using a comma separated list, for
# example: swift_store_admin_tenants = service:glance,*:admin
#swift_store_admin_tenants =
# The region of the swift endpoint to be used for single tenant. This setting
# is only necessary if the tenant has multiple swift endpoints.
#swift_store_region =
# If set to False, disables SSL layer compression of https swift requests.
# Setting to 'False' may improve performance for images which are already
# in a compressed format, eg qcow2. If set to True, enables SSL layer
# compression (provided it is supported by the target swift proxy).
#swift_store_ssl_compression = True
# The number of times a Swift download will be retried before the
# request fails
#swift_store_retry_get_count = 0
# Bypass SSL verification for Swift
#swift_store_auth_insecure = False
# The path to a CA certificate bundle file to use for SSL verification when
# communicating with Swift.
#swift_store_cacert =
# ============ S3 Store Options =============================
# Address where the S3 authentication service lives
# Valid schemes are 'http://' and 'https://'
# If no scheme specified, default to 'http://'
s3_store_host = s3.amazonaws.com
# User to authenticate against the S3 authentication service
s3_store_access_key = <20-char AWS access key>
# Auth key for the user authenticating against the
# S3 authentication service
s3_store_secret_key = <40-char AWS secret key>
# Container within the account that the account should use
# for storing images in S3. Note that S3 has a flat namespace,
# so you need a unique bucket name for your glance images. An
# easy way to do this is append your AWS access key to "glance".
# S3 buckets in AWS *must* be lowercased, so remember to lowercase
# your AWS access key if you use it in your bucket name below!
s3_store_bucket = <lowercased 20-char aws access key>glance
# Do we create the bucket if it does not exist?
s3_store_create_bucket_on_put = False
# When sending images to S3, the data will first be written to a
# temporary buffer on disk. By default the platform's temporary directory
# will be used. If required, an alternative directory can be specified here.
#s3_store_object_buffer_dir = /path/to/dir
# When forming a bucket url, boto will either set the bucket name as the
# subdomain or as the first token of the path. Amazon's S3 service will
# accept it as the subdomain, but Swift's S3 middleware requires it be
# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'.
#s3_store_bucket_url_format = subdomain
# Size, in MB, should S3 start chunking image files
# and do a multipart upload in S3. The default is 100MB.
#s3_store_large_object_size = 100
# Multipart upload part size, in MB, should S3 use when uploading
# parts. The size must be greater than or equal to
# 5MB. The default is 10MB.
#s3_store_large_object_chunk_size = 10
# The number of thread pools to perform a multipart upload
# in S3. The default is 10.
#s3_store_thread_pools = 10
# ============ RBD Store Options =============================
# Ceph configuration file path
# If using cephx authentication, this file should
# include a reference to the right keyring
# in a client.<USER> section
#rbd_store_ceph_conf = /etc/ceph/ceph.conf
# RADOS user to authenticate as (only applicable if using cephx)
# If <None>, a default will be chosen based on the client. section
# in rbd_store_ceph_conf
#rbd_store_user = <None>
# RADOS pool in which images are stored
#rbd_store_pool = images
# RADOS images will be chunked into objects of this size (in megabytes).
# For best performance, this should be a power of two
#rbd_store_chunk_size = 8
# ============ Sheepdog Store Options =============================
sheepdog_store_address = localhost
sheepdog_store_port = 7000
# Images will be chunked into objects of this size (in megabytes).
# For best performance, this should be a power of two
sheepdog_store_chunk_size = 64
# ============ Cinder Store Options ===============================
# Info to match when looking for cinder in the service catalog
# Format is : separated values of the form:
# <service_type>:<service_name>:<endpoint_type> (string value)
#cinder_catalog_info = volume:cinder:publicURL
# Override service catalog lookup with template for cinder endpoint
# e.g. http://localhost:8776/v1/%(project_id)s (string value)
#cinder_endpoint_template = <None>
# Region name of this node (string value)
#os_region_name = <None>
# Location of ca certicates file to use for cinder client requests
# (string value)
#cinder_ca_certificates_file = <None>
# Number of cinderclient retries on failed http calls (integer value)
#cinder_http_retries = 3
# Allow to perform insecure SSL requests to cinder (boolean value)
#cinder_api_insecure = False
# ============ VMware Datastore Store Options =====================
# ESX/ESXi or vCenter Server target system.
# The server value can be an IP address or a DNS name
# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com
#vmware_server_host = <None>
# Server username (string value)
#vmware_server_username = <None>
# Server password (string value)
#vmware_server_password = <None>
# Inventory path to a datacenter (string value)
# Value optional when vmware_server_ip is an ESX/ESXi host: if specified
# should be `ha-datacenter`.
# Deprecated in favor of vmware_datastores.
#vmware_datacenter_path = <None>
# Datastore associated with the datacenter (string value)
# Deprecated in favor of vmware_datastores.
#vmware_datastore_name = <None>
# A list of datastores where the image can be stored.
# This option may be specified multiple times for specifying multiple
# datastores. Either one of vmware_datastore_name or vmware_datastores is
# required. The datastore name should be specified after its datacenter
# path, separated by ":". An optional weight may be given after the datastore
# name, separated again by ":". Thus, the required format becomes
# <datacenter_path>:<datastore_name>:<optional_weight>.
# When adding an image, the datastore with highest weight will be selected,
# unless there is not enough free space available in cases where the image size
# is already known. If no weight is given, it is assumed to be zero and the
# directory will be considered for selection last. If multiple datastores have
# the same weight, then the one with the most free space available is selected.
#vmware_datastores = <None>
# The number of times we retry on failures
# e.g., socket error, etc (integer value)
#vmware_api_retry_count = 10
# The interval used for polling remote tasks
# invoked on VMware ESX/VC server in seconds (integer value)
#vmware_task_poll_interval = 5
# Absolute path of the folder containing the images in the datastore
# (string value)
#vmware_store_image_dir = /openstack_glance
# Allow to perform insecure SSL requests to the target system (boolean value)
#vmware_api_insecure = False

View File

@ -5,46 +5,41 @@
pbr>=1.6 # Apache-2.0 pbr>=1.6 # Apache-2.0
# < 0.8.0/0.8 does not work, see https://bugs.launchpad.net/bugs/1153983 # < 0.8.0/0.8 does not work, see https://bugs.launchpad.net/bugs/1153983
SQLAlchemy>=0.9.7,<=0.9.99 SQLAlchemy<1.1.0,>=1.0.10 # MIT
anyjson>=0.3.3 anyjson>=0.3.3
eventlet>=0.16.1,!=0.17.0 eventlet>=0.16.1,!=0.17.0
PasteDeploy>=1.5.0 PasteDeploy>=1.5.0
Routes>=1.12.3,!=2.0 Routes!=2.0,!=2.1,!=2.3.0,>=1.12.3;python_version=='2.7' # MIT
Routes!=2.0,!=2.3.0,>=1.12.3;python_version!='2.7' # MIT
WebOb>=1.2.3 WebOb>=1.2.3
sqlalchemy-migrate>=0.9.5 sqlalchemy-migrate>=0.9.5
httplib2>=0.7.5 httplib2>=0.7.5
kombu>=2.5.0 pycrypto>=2.6 # Public Domain
pycrypto>=2.6 oslo.config>=3.7.0 # Apache-2.0
iso8601>=0.1.9 oslo.concurrency>=3.7.1 # Apache-2.0
ordereddict oslo.context>=0.2.0 # Apache-2.0
oslo_config>=1.9.3,<1.10.0 # Apache-2.0 oslo.service>=1.0.0 # Apache-2.0
oslo_concurrency>=1.8.0,<1.9.0 # Apache-2.0 oslo.utils>=3.5.0 # Apache-2.0
oslo_context>=0.2.0,<0.3.0 # Apache-2.0 stevedore>=1.5.0 # Apache-2.0
oslo_utils>=1.4.0,<1.5.0 # Apache-2.0 taskflow>=1.26.0 # Apache-2.0
stevedore>=1.3.0,<1.4.0 # Apache-2.0 keystoneauth1>=2.1.0 # Apache-2.0
taskflow>=0.7.1,<0.8.0 keystonemiddleware!=4.1.0,>=4.0.0 # Apache-2.0
keystonemiddleware>=1.5.0,<1.6.0 WSME>=0.8 # MIT
WSME>=0.6
# For paste.util.template used in keystone.common.template # For paste.util.template used in keystone.common.template
Paste Paste
python-keystoneclient!=1.8.0,!=2.1.0,<3.0.0,>=1.6.0 # Apache-2.0
jsonschema>=2.0.0,<3.0.0
python-keystoneclient>=1.1.0,<1.4.0
pyOpenSSL>=0.11 pyOpenSSL>=0.11
# Required by openstack.common libraries # Required by openstack.common libraries
six>=1.9.0 six>=1.9.0
oslo_db>=1.7.0,<1.8.0 # Apache-2.0 oslo.db>=4.1.0 # Apache-2.0
oslo_i18n>=1.5.0,<1.6.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0
oslo_log>=1.0.0,<1.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0
oslo_messaging>=1.8.0,<1.9.0 # Apache-2.0 oslo.messaging>=4.0.0 # Apache-2.0
oslo_policy>=0.3.1,<0.4.0 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0
oslo_serialization>=1.4.0,<1.5.0 # Apache-2.0 oslo.policy>=0.5.0 # Apache-2.0
oslo.service>=0.1.0 # Apache-2.0 oslo.serialization>=1.10.0 # Apache-2.0
retrying>=1.2.3,!=1.3.0 # Apache-2.0 retrying!=1.3.0,>=1.2.3 # Apache-2.0
osprofiler>=0.3.0 # Apache-2.0 osprofiler>=1.1.0 # Apache-2.0
# Artifact repository
semantic_version>=2.3.1

View File

@ -1,251 +0,0 @@
#!/bin/bash
set -eu
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run Glance's test suite(s)"
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment"
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -u, --update Update the virtual environment with any newer package versions"
echo " -p, --pep8 Just run PEP8 and HACKING compliance check"
echo " -8, --pep8-only-changed Just run PEP8 and HACKING compliance check on files changed since HEAD~1"
echo " -P, --no-pep8 Don't run static code checks"
echo " -c, --coverage Generate coverage report"
echo " -d, --debug Run tests with testtools instead of testr. This allows you to use the debugger."
echo " -h, --help Print this usage message"
echo " --virtual-env-path <path> Location of the virtualenv directory"
echo " Default: \$(pwd)"
echo " --virtual-env-name <name> Name of the virtualenv directory"
echo " Default: .venv"
echo " --tools-path <dir> Location of the tools directory"
echo " Default: \$(pwd)"
echo " --concurrency <concurrency> How many processes to use when running the tests. A value of 0 autodetects concurrency from your CPU count"
echo " Default: 0"
echo ""
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
echo " prefer to run tests NOT in a virtual environment, simply pass the -N option."
exit
}
function process_options {
i=1
while [ $i -le $# ]; do
case "${!i}" in
-h|--help) usage;;
-V|--virtual-env) always_venv=1; never_venv=0;;
-N|--no-virtual-env) always_venv=0; never_venv=1;;
-s|--no-site-packages) no_site_packages=1;;
-f|--force) force=1;;
-u|--update) update=1;;
-p|--pep8) just_pep8=1;;
-8|--pep8-only-changed) just_pep8_changed=1;;
-P|--no-pep8) no_pep8=1;;
-c|--coverage) coverage=1;;
-d|--debug) debug=1;;
--virtual-env-path)
(( i++ ))
venv_path=${!i}
;;
--virtual-env-name)
(( i++ ))
venv_dir=${!i}
;;
--tools-path)
(( i++ ))
tools_path=${!i}
;;
--concurrency)
(( i++ ))
concurrency=${!i}
;;
-*) testropts="$testropts ${!i}";;
*) testrargs="$testrargs ${!i}"
esac
(( i++ ))
done
}
tool_path=${tools_path:-$(pwd)}
venv_path=${venv_path:-$(pwd)}
venv_dir=${venv_name:-.venv}
with_venv=tools/with_venv.sh
always_venv=0
never_venv=0
force=0
no_site_packages=0
installvenvopts=
testrargs=
testropts=
wrapper=""
just_pep8=0
just_pep8_changed=0
no_pep8=0
coverage=0
debug=0
update=0
concurrency=0
LANG=en_US.UTF-8
LANGUAGE=en_US:en
LC_ALL=C
process_options $@
# Make our paths available to other scripts we call
export venv_path
export venv_dir
export venv_name
export tools_dir
export venv=${venv_path}/${venv_dir}
if [ $no_site_packages -eq 1 ]; then
installvenvopts="--no-site-packages"
fi
function run_tests {
# Cleanup *pyc
${wrapper} find . -type f -name "*.pyc" -delete
if [ $debug -eq 1 ]; then
if [ "$testropts" = "" ] && [ "$testrargs" = "" ]; then
# Default to running all tests if specific test is not
# provided.
testrargs="discover ./glance/tests"
fi
${wrapper} python -m testtools.run $testropts $testrargs
# Short circuit because all of the testr and coverage stuff
# below does not make sense when running testtools.run for
# debugging purposes.
return $?
fi
if [ $coverage -eq 1 ]; then
TESTRTESTS="$TESTRTESTS --coverage"
else
TESTRTESTS="$TESTRTESTS"
fi
# Just run the test suites in current environment
set +e
testrargs=`echo "$testrargs" | sed -e's/^\s*\(.*\)\s*$/\1/'`
TESTRTESTS="$TESTRTESTS --testr-args='--subunit --concurrency $concurrency $testropts $testrargs'"
if [ setup.cfg -nt glance.egg-info/entry_points.txt ]
then
${wrapper} python setup.py egg_info
fi
echo "Running \`${wrapper} $TESTRTESTS\`"
if ${wrapper} which subunit-2to1 2>&1 > /dev/null
then
# subunit-2to1 is present, testr subunit stream should be in version 2
# format. Convert to version one before colorizing.
bash -c "${wrapper} $TESTRTESTS | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py"
else
bash -c "${wrapper} $TESTRTESTS | ${wrapper} tools/colorizer.py"
fi
RESULT=$?
set -e
copy_subunit_log
if [ $coverage -eq 1 ]; then
echo "Generating coverage report in covhtml/"
# Don't compute coverage for common code, which is tested elsewhere
${wrapper} coverage combine
${wrapper} coverage html --include='glance/*' --omit='glance/openstack/common/*' -d covhtml -i
fi
return $RESULT
}
function copy_subunit_log {
LOGNAME=`cat .testrepository/next-stream`
LOGNAME=$(($LOGNAME - 1))
LOGNAME=".testrepository/${LOGNAME}"
cp $LOGNAME subunit.log
}
function warn_on_flake8_without_venv {
if [ $never_venv -eq 1 ]; then
echo "**WARNING**:"
echo "Running flake8 without virtual env may miss OpenStack HACKING detection"
fi
}
function run_pep8 {
echo "Running flake8 ..."
warn_on_flake8_without_venv
bash -c "${wrapper} flake8"
}
TESTRTESTS="lockutils-wrapper python setup.py testr"
if [ $never_venv -eq 0 ]
then
# Remove the virtual environment if --force used
if [ $force -eq 1 ]; then
echo "Cleaning virtualenv..."
rm -rf ${venv}
fi
if [ $update -eq 1 ]; then
echo "Updating virtualenv..."
python tools/install_venv.py $installvenvopts
fi
if [ -e ${venv} ]; then
wrapper="${with_venv}"
else
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
python tools/install_venv.py $installvenvopts
wrapper="${with_venv}"
else
echo -e "No virtual environment found...create one? (Y/n) \c"
read use_ve
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
# Install the virtualenv and run the test suite in it
python tools/install_venv.py $installvenvopts
wrapper=${with_venv}
fi
fi
fi
fi
# Delete old coverage data from previous runs
if [ $coverage -eq 1 ]; then
${wrapper} coverage erase
fi
if [ $just_pep8 -eq 1 ]; then
run_pep8
exit
fi
if [ $just_pep8_changed -eq 1 ]; then
# NOTE(gilliard) We want use flake8 to check the entirety of every file that has
# a change in it. Unfortunately the --filenames argument to flake8 only accepts
# file *names* and there are no files named (eg) "nova/compute/manager.py". The
# --diff argument behaves surprisingly as well, because although you feed it a
# diff, it actually checks the file on disk anyway.
files=$(git diff --name-only HEAD~1 | tr '\n' ' ')
echo "Running flake8 on ${files}"
warn_on_flake8_without_venv
bash -c "diff -u --from-file /dev/null ${files} | ${wrapper} flake8 --diff"
exit
fi
run_tests
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
# not when we're running tests individually. To handle this, we need to
# distinguish between options (testropts), which begin with a '-', and
# arguments (testrargs).
if [ -z "$testrargs" ]; then
if [ $no_pep8 -eq 0 ]; then
run_pep8
fi
fi

View File

@ -2,13 +2,11 @@
# of appearance. Changing the order has an impact on the overall integration # of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later. # process, which may cause wedges in the gate later.
# Hacking already pins down pep8, pyflakes and flake8
hacking>=0.10.0,<0.11
# For translations processing # For translations processing
Babel>=1.3 Babel>=1.3
# Needed for testing # Needed for testing
bandit>=0.17.3 # Apache-2.0
coverage>=3.6 coverage>=3.6
discover discover
fixtures>=0.3.14 fixtures>=0.3.14
@ -17,23 +15,16 @@ mock>=1.0
sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3 sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3
requests>=2.2.0,!=2.4.0 requests>=2.2.0,!=2.4.0
testrepository>=0.0.18 testrepository>=0.0.18
testscenarios>=0.4 # Apache-2.0/BSD
testtools>=0.9.36,!=1.2.0 testtools>=0.9.36,!=1.2.0
psutil>=1.1.1,<2.0.0 psutil>=1.1.1,<2.0.0
oslotest>=1.5.1,<1.6.0 # Apache-2.0 oslotest>=1.5.1,<1.6.0 # Apache-2.0
# Optional packages that should be installed when testing # Optional packages that should be installed when testing
MySQL-python PyMySQL!=0.7.7,>=0.6.2 # MIT License
psycopg2 psycopg2
pysendfile==2.0.0 pysendfile==2.0.0
qpid-python qpid-python;python_version=='2.7' # Apache-2.0
xattr>=0.4 xattr>=0.4
# Documentation # Documentation
oslosphinx>=2.5.0,<2.6.0 # Apache-2.0 oslosphinx>=2.5.0,<2.6.0 # Apache-2.0
# Glance catalog index
elasticsearch>=1.3.0
python-daisyclient
python-ironicclient
ironic
mox

View File

@ -1,73 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Installation script for Daisy's development virtualenv
"""
from __future__ import print_function
import os
import sys
import install_venv_common as install_venv # noqa
def print_help():
help = """
Daisy development environment setup is complete.
Daisy development uses virtualenv to track and manage Python dependencies
while in development and testing.
To activate the Daisy virtualenv for the extent of your current shell session
you can run:
$ source .venv/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
print(help)
def main(argv):
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
venv = os.path.join(root, '.venv')
pip_requires = os.path.join(root, 'requirements.txt')
test_requires = os.path.join(root, 'test-requirements.txt')
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
project = 'Daisy'
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
py_version, project)
options = install.parse_args(argv)
install.check_python_version()
install.check_dependencies()
install.create_virtualenv(no_site_packages=options.no_site_packages)
install.install_dependencies()
install.run_command([os.path.join(venv, 'bin/python'),
'setup.py', 'develop'])
print_help()
if __name__ == '__main__':
main(sys.argv)

View File

@ -1,172 +0,0 @@
# Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides methods needed by installation script for OpenStack development
virtual environments.
Since this script is used to bootstrap a virtualenv from the system's Python
environment, it should be kept strictly compatible with Python 2.6.
Synced in from openstack-common
"""
from __future__ import print_function
import optparse
import os
import subprocess
import sys
class InstallVenv(object):
def __init__(self, root, venv, requirements,
test_requirements, py_version,
project):
self.root = root
self.venv = venv
self.requirements = requirements
self.test_requirements = test_requirements
self.py_version = py_version
self.project = project
def die(self, message, *args):
print(message % args, file=sys.stderr)
sys.exit(1)
def check_python_version(self):
if sys.version_info < (2, 6):
self.die("Need Python Version >= 2.6")
def run_command_with_code(self, cmd, redirect_output=True,
check_exit_code=True):
"""Runs a command in an out-of-process shell.
Returns the output of that command. Working directory is self.root.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return (output, proc.returncode)
def run_command(self, cmd, redirect_output=True, check_exit_code=True):
return self.run_command_with_code(cmd, redirect_output,
check_exit_code)[0]
def get_distro(self):
if (os.path.exists('/etc/fedora-release') or
os.path.exists('/etc/redhat-release')):
return Fedora(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
else:
return Distro(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
def check_dependencies(self):
self.get_distro().install_virtualenv()
def create_virtualenv(self, no_site_packages=True):
"""Creates the virtual environment and installs PIP.
Creates the virtual environment and installs PIP only into the
virtual environment.
"""
if not os.path.isdir(self.venv):
print('Creating venv...', end=' ')
if no_site_packages:
self.run_command(['virtualenv', '-q', '--no-site-packages',
self.venv])
else:
self.run_command(['virtualenv', '-q', self.venv])
print('done.')
else:
print("venv already exists...")
pass
def pip_install(self, *args):
self.run_command(['tools/with_venv.sh',
'pip', 'install', '--upgrade'] + list(args),
redirect_output=False)
def install_dependencies(self):
print('Installing dependencies with pip (this can take a while)...')
# First things first, make sure our venv has the latest pip and
# setuptools and pbr
self.pip_install('pip>=1.4')
self.pip_install('setuptools')
self.pip_install('pbr')
self.pip_install('-r', self.requirements, '-r', self.test_requirements)
def parse_args(self, argv):
"""Parses command-line arguments."""
parser = optparse.OptionParser()
parser.add_option('-n', '--no-site-packages',
action='store_true',
help="Do not inherit packages from global Python "
"install.")
return parser.parse_args(argv[1:])[0]
class Distro(InstallVenv):
def check_cmd(self, cmd):
return bool(self.run_command(['which', cmd],
check_exit_code=False).strip())
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if self.check_cmd('easy_install'):
print('Installing virtualenv via easy_install...', end=' ')
if self.run_command(['easy_install', 'virtualenv']):
print('Succeeded')
return
else:
print('Failed')
self.die('ERROR: virtualenv not found.\n\n%s development'
' requires virtualenv, please install it using your'
' favorite package management tool' % self.project)
class Fedora(Distro):
"""This covers all Fedora-based distributions.
Includes: Fedora, RHEL, CentOS, Scientific Linux
"""
def check_pkg(self, pkg):
return self.run_command_with_code(['rpm', '-q', pkg],
check_exit_code=False)[1] == 0
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if not self.check_pkg('python-virtualenv'):
self.die("Please install 'python-virtualenv'.")
super(Fedora, self).install_virtualenv()

View File

@ -1,119 +0,0 @@
#!/usr/bin/env python
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import keystoneclient.v2_0.client
from oslo_config import cfg
from oslo_log import log as logging
import glance.context
import glance.db.sqlalchemy.api as db_api
from glance import i18n
import glance.registry.context
_ = i18n._
_LC = i18n._LC
_LE = i18n._LE
_LI = i18n._LI
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.StreamHandler())
LOG.setLevel(logging.DEBUG)
def get_owner_map(ksclient, owner_is_tenant=True):
if owner_is_tenant:
entities = ksclient.tenants.list()
else:
entities = ksclient.users.list()
# build mapping of (user or tenant) name to id
return dict([(entity.name, entity.id) for entity in entities])
def build_image_owner_map(owner_map, db, context):
image_owner_map = {}
for image in db.image_get_all(context):
image_id = image['id']
owner_name = image['owner']
if not owner_name:
LOG.info(_LI('Image %s has no owner. Skipping.') % image_id)
continue
try:
owner_id = owner_map[owner_name]
except KeyError:
msg = (_LE('Image "%(image)s" owner "%(owner)s" was not found. '
'Skipping.'),
{'image': image_id, 'owner': owner_name})
LOG.error(msg)
continue
image_owner_map[image_id] = owner_id
msg = (_LI('Image "%(image)s" owner "%(owner)s" -> "%(owner_id)s"'),
{'image': image_id, 'owner': owner_name, 'owner_id': owner_id})
LOG.info(msg)
return image_owner_map
def update_image_owners(image_owner_map, db, context):
for (image_id, image_owner) in image_owner_map.items():
db.image_update(context, image_id, {'owner': image_owner})
LOG.info(_LI('Image %s successfully updated.') % image_id)
if __name__ == "__main__":
config = cfg.CONF
extra_cli_opts = [
cfg.BoolOpt('dry-run',
help='Print output but do not make db changes.'),
cfg.StrOpt('keystone-auth-uri',
help='Authentication endpoint'),
cfg.StrOpt('keystone-admin-tenant-name',
help='Administrative user\'s tenant name'),
cfg.StrOpt('keystone-admin-user',
help='Administrative user\'s id'),
cfg.StrOpt('keystone-admin-password',
help='Administrative user\'s password',
secret=True),
]
config.register_cli_opts(extra_cli_opts)
config(project='glance', prog='glance-registry')
db_api.configure_db()
context = glance.common.context.RequestContext(is_admin=True)
auth_uri = config.keystone_auth_uri
admin_tenant_name = config.keystone_admin_tenant_name
admin_user = config.keystone_admin_user
admin_password = config.keystone_admin_password
if not (auth_uri and admin_tenant_name and admin_user and admin_password):
LOG.critical(_LC('Missing authentication arguments'))
sys.exit(1)
ks = keystoneclient.v2_0.client.Client(username=admin_user,
password=admin_password,
tenant_name=admin_tenant_name,
auth_url=auth_uri)
owner_map = get_owner_map(ks, config.owner_is_tenant)
image_updates = build_image_owner_map(owner_map, db_api, context)
if not config.dry_run:
update_image_owners(image_updates, db_api, context)

View File

@ -1,7 +0,0 @@
#!/bin/bash
TOOLS_PATH=${TOOLS_PATH:-$(dirname $0)}
VENV_PATH=${VENV_PATH:-${TOOLS_PATH}}
VENV_DIR=${VENV_NAME:-/../.venv}
TOOLS=${TOOLS_PATH}
VENV=${VENV:-${VENV_PATH}/${VENV_DIR}}
source ${VENV}/bin/activate && "$@"

View File

@ -9,22 +9,15 @@ usedevelop = True
install_command = pip install -U {opts} {packages} install_command = pip install -U {opts} {packages}
deps = -r{toxinidir}/requirements.txt deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt -r{toxinidir}/test-requirements.txt
commands = lockutils-wrapper python setup.py testr --slowest --testr-args='{posargs}' commands = python setup.py testr --slowest --testr-args='{posargs}'
whitelist_externals = bash whitelist_externals = bash
[tox:jenkins]
downloadcache = ~/cache/pip
[testenv:pep8] [testenv:pep8]
commands = commands =
flake8 {posargs} flake8 {posargs}
# Check that .po and .pot files are valid: # Check that .po and .pot files are valid:
bash -c "find daisy -type f -regex '.*\.pot?' -print0|xargs -0 -n 1 msgfmt --check-format -o /dev/null" bash -c "find daisy -type f -regex '.*\.pot?' -print0|xargs -0 -n 1 msgfmt --check-format -o /dev/null"
[testenv:cover]
setenv = VIRTUAL_ENV={envdir}
commands = python setup.py testr --coverage --testr-args='^(?!.*test.*coverage).*$'
[testenv:venv] [testenv:venv]
commands = {posargs} commands = {posargs}
@ -48,6 +41,3 @@ commands = python setup.py build_sphinx
# H904 Wrap long lines in parentheses instead of a backslash # H904 Wrap long lines in parentheses instead of a backslash
ignore = E711,E712,H302,H402,H404,H405,H904,F841,F821,E265,F812,F402,E226,E731 ignore = E711,E712,H302,H402,H404,H405,H904,F841,F821,E265,F812,F402,E226,E731
exclude = .venv,.git,.tox,dist,doc,etc,*daisy/locale*,*openstack/common*,*lib/python*,*egg,build,daisy/db/sqlalchemy/api.py,daisy/i18n.py exclude = .venv,.git,.tox,dist,doc,etc,*daisy/locale*,*openstack/common*,*lib/python*,*egg,build,daisy/db/sqlalchemy/api.py,daisy/i18n.py
[hacking]
local-check-factory = daisy.hacking.checks.factory

View File

@ -1,93 +0,0 @@
#!/bin/bash
#********
# This file is used to develop unittest environment
#
# 1 please copy it to the modules you want to
# such as: cp unittest_install.sh ../../openstack/keystone/
# 2 then run the bash, unittest environment can be developed
#
# note: this bash only support CGSLV5
#*****
Install_version=`uname -a`
Right_version="3.10"
result=$(echo $Install_version | grep "${Right_version}")
if [[ "$result" == "" ]]
then
echo "only support CGSLV5,please change your version first..."
exit 1
fi
pip_ip=10.43.177.17
log_path=logs
mkdir -p $log_path
rm -rf /etc/yum.repos.d/opencos.repo
opencos_repo=/etc/yum.repos.d/opencos.repo
echo "Create $opencos_repo ..."
echo "[opencos]">>$opencos_repo
echo "name=opencos">>$opencos_repo
echo "baseurl=http://$pip_ip/pypi/">>$opencos_repo
echo "enabled=1">>$opencos_repo
echo "gpgcheck=0">>$opencos_repo
rm -rf ~/.pip/pip.conf
pip_config=~/.pip/pip.conf
echo "Create $pip_config ..."
if [ ! -d `dirname $pip_config` ]; then
mkdir -p `dirname $pip_config`
fi
echo "[global]">$pip_config
echo "find-links = http://$pip_ip/pypi">>$pip_config
echo "no-index = true">>$pip_config
echo "[install]">>$pip_config
echo "trusted-host = $pip_ip">>$pip_config
rm -rf ~/.pydistutils.cfg
pydistutils_cfg=~/.pydistutils.cfg
echo "Create $pydistutils_cfg ..."
echo "[easy_install]">$pydistutils_cfg
echo "index_url = http://$pip_ip/pypi">>$pydistutils_cfg
modules=(virtualenv mariadb-devel postgresql-devel libffi-devel m2crypto openssl-devel
cyrus-sasl-devel sqlite-devel libxslt-devel openldap-devel mongodb-server)
yum clean all 1>/dev/null 2>/dev/null
# for virtual environment demand pip version>=1.6, so install it whether installed.
yum --disablerepo=* --enablerepo=opencos install -y pip 1>$log_path/pip.log 2>$log_path/pip.err
yum --disablerepo=* --enablerepo=opencos install -y swig 1>$log_path/swig.log 2>$log_path/swig.err
yum --disablerepo=* --enablerepo=opencos install -y openstack-ceilometer-api 1>$log_path/ceilometer-api.log \
2>$log_path/ceilometer-api.err
# install modules
for mod in ${modules[@]}; do
echo -n "yum install $mod ... "
already_install=`rpm -qa | grep $mod`
if [ "$already_install" == "" ]; then
yum --disablerepo=* --enablerepo=opencos install -y $mod 1>$log_path/$mod.log 2>$log_path/$mod.err
if [ -s $log_path/$mod.err ]; then
echo "fail"
echo "Please contact li.guomin3@zte.com.cn,wu.wei266@zte.com.cn,liang.jingtao@zte.com.cn "
exit 1
else
echo "ok(install finish)"
fi
else
echo "ok(already exist)"
fi
done
#modify for heat M2Crypto install error
file_name=/usr/include/openssl/opensslconf.h
action=`sed -i 's/#error "This openssl-devel package does not work your architecture?"/#include "opensslconf-x86_64.h"/g' $file_name`
echo "install venv ... "
chmod +x tools/*
python tools/install_venv.py 1>$log_path/install_venv.log 2>$log_path/install_venv.err
if grep "development environment setup is complete." $log_path/install_venv.log
then
echo "development environment setup is complete..."
else
echo "development environment setup is fail,please check logs/install_venv.err"
cat $log_path/install_venv.err
fi