From 07115eb5c06a7f5a63bfacdf33ded466c8f2e2c9 Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Tue, 26 Feb 2013 12:38:18 -0800 Subject: [PATCH] Refactor error logging It is hard to grep errors in current log. so in this patch, I'm updating die function which also writes log for screen_log_dir/error.log. In future, we may categolize negative fault by using this error.log. Change-Id: I70a8cfe67ed408284f5c88c762c6bb8acb8ecdb2 --- exercises/aggregates.sh | 12 +++---- exercises/boot_from_volume.sh | 16 ++++----- exercises/bundle.sh | 11 +++--- exercises/euca.sh | 59 ++++++++++++++------------------- exercises/floating_ips.sh | 39 ++++++++++------------ exercises/horizon.sh | 2 +- exercises/quantum-adv-test.sh | 5 ++- exercises/sec_groups.sh | 2 +- exercises/swift.sh | 10 +++--- exercises/volumes.sh | 49 +++++++++++---------------- functions | 27 ++++++++------- lib/glance | 3 +- lib/keystone | 3 +- lib/nova | 3 +- lib/quantum | 17 ++++------ lib/quantum_plugins/nicira | 15 +++------ lib/quantum_plugins/openvswitch | 4 +-- lib/rpc_backend | 3 +- stack.sh | 16 +++------ 19 files changed, 125 insertions(+), 171 deletions(-) diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index a92c0d9276..3c83725491 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -56,7 +56,7 @@ exit_if_aggregate_present() { if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then echo "SUCCESS $aggregate_name not present" else - echo "ERROR found aggregate: $aggregate_name" + die $LINENO "found aggregate: $aggregate_name" exit -1 fi } @@ -67,15 +67,14 @@ AGGREGATE_ID=$(nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1) # check aggregate created -nova aggregate-list | grep -q " $AGGREGATE_NAME " || die "Aggregate $AGGREGATE_NAME not created" +nova aggregate-list | grep -q " $AGGREGATE_NAME " || die $LINENO "Aggregate $AGGREGATE_NAME not created" # Ensure creating a duplicate fails # ================================= if nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE; then - echo "ERROR could create duplicate aggregate" - exit -1 + die $LINENO "could create duplicate aggregate" fi @@ -113,7 +112,7 @@ nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_2_KEY ${META_DATA_3_KEY}=78 nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY -nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die "ERROR metadata was not cleared" +nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die $LINENO "ERROR metadata was not cleared" nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}" @@ -129,8 +128,7 @@ FIRST_HOST=$(nova host-list | grep compute | get_field 1 | head -1) nova aggregate-add-host $AGGREGATE_ID $FIRST_HOST nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST if nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST; then - echo "ERROR could add duplicate host to single aggregate" - exit -1 + die $LINENO "could add duplicate host to single aggregate" fi nova aggregate-remove-host $AGGREGATE2_ID $FIRST_HOST nova aggregate-remove-host $AGGREGATE_ID $FIRST_HOST diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 679091bb39..14d00492f6 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -72,7 +72,7 @@ glance image-list # Grab the id of the image to launch IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) -die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" +die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" # Security Groups # --------------- @@ -140,7 +140,7 @@ fi # Create the bootable volume start_time=$(date +%s) cinder create --image-id $IMAGE --display_name=$VOL_NAME --display_description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ - die "Failure creating volume $VOL_NAME" + die $LINENO "Failure creating volume $VOL_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then echo "Volume $VOL_NAME not created" exit 1 @@ -150,7 +150,7 @@ echo "Completed cinder create in $((end_time - start_time)) seconds" # Get volume ID VOL_ID=$(cinder list | grep $VOL_NAME | get_field 1) -die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME" +die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME" # Boot instance # ------------- @@ -159,7 +159,7 @@ die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME" # =::: # Leaving the middle two fields blank appears to do-the-right-thing VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security_groups=$SECGROUP --key_name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2) -die_if_not_set VM_UUID "Failure launching $VM_NAME" +die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" # Check that the status is active within ACTIVE_TIMEOUT seconds if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then @@ -169,7 +169,7 @@ fi # Get the instance IP IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) -die_if_not_set IP "Failure retrieving IP address" +die_if_not_set $LINENO IP "Failure retrieving IP address" # Private IPs can be pinged in single node deployments ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT @@ -178,7 +178,7 @@ ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT # -------- # Delete volume backed instance -nova delete $VM_UUID || die "Failure deleting instance $VM_NAME" +nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME" if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then echo "Server $VM_NAME not deleted" exit 1 @@ -192,7 +192,7 @@ fi # Delete volume start_time=$(date +%s) -cinder delete $VOL_ID || die "Failure deleting volume $VOLUME_NAME" +cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOLUME_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then echo "Volume $VOL_NAME not deleted" exit 1 @@ -201,7 +201,7 @@ end_time=$(date +%s) echo "Completed cinder delete in $((end_time - start_time)) seconds" # Delete secgroup -nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" +nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP" set +o xtrace echo "*********************************************************************" diff --git a/exercises/bundle.sh b/exercises/bundle.sh index 12f27323b9..dce36aa31f 100755 --- a/exercises/bundle.sh +++ b/exercises/bundle.sh @@ -49,21 +49,20 @@ REGISTER_TIMEOUT=${REGISTER_TIMEOUT:-15} BUCKET=testbucket IMAGE=bundle.img truncate -s 5M /tmp/$IMAGE -euca-bundle-image -i /tmp/$IMAGE || die "Failure bundling image $IMAGE" +euca-bundle-image -i /tmp/$IMAGE || die $LINENO "Failure bundling image $IMAGE" -euca-upload-bundle --debug -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die "Failure uploading bundle $IMAGE to $BUCKET" +euca-upload-bundle --debug -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die $LINENO "Failure uploading bundle $IMAGE to $BUCKET" AMI=`euca-register $BUCKET/$IMAGE.manifest.xml | cut -f2` -die_if_not_set AMI "Failure registering $BUCKET/$IMAGE" +die_if_not_set $LINENO AMI "Failure registering $BUCKET/$IMAGE" # Wait for the image to become available if ! timeout $REGISTER_TIMEOUT sh -c "while euca-describe-images | grep $AMI | grep -q available; do sleep 1; done"; then - echo "Image $AMI not available within $REGISTER_TIMEOUT seconds" - exit 1 + die $LINENO "Image $AMI not available within $REGISTER_TIMEOUT seconds" fi # Clean up -euca-deregister $AMI || die "Failure deregistering $AMI" +euca-deregister $AMI || die $LINENO "Failure deregistering $AMI" set +o xtrace echo "*********************************************************************" diff --git a/exercises/euca.sh b/exercises/euca.sh index 8b15da8d49..50d4744e69 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -56,68 +56,62 @@ SECGROUP=${SECGROUP:-euca_secgroup} # Find a machine image to boot IMAGE=`euca-describe-images | grep machine | grep ${DEFAULT_IMAGE_NAME} | cut -f2 | head -n1` -die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" +die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" # Add a secgroup if ! euca-describe-groups | grep -q $SECGROUP; then euca-add-group -d "$SECGROUP description" $SECGROUP if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-groups | grep -q $SECGROUP; do sleep 1; done"; then - echo "Security group not created" - exit 1 + die $LINENO "Security group not created" fi fi # Launch it INSTANCE=`euca-run-instances -g $SECGROUP -t $DEFAULT_INSTANCE_TYPE $IMAGE | grep INSTANCE | cut -f2` -die_if_not_set INSTANCE "Failure launching instance" +die_if_not_set $LINENO INSTANCE "Failure launching instance" # Assure it has booted within a reasonable time if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then - echo "server didn't become active within $RUNNING_TIMEOUT seconds" - exit 1 + die $LINENO "server didn't become active within $RUNNING_TIMEOUT seconds" fi # Volumes # ------- if [[ "$ENABLED_SERVICES" =~ "c-vol" ]]; then VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2` - die_if_not_set VOLUME_ZONE "Failure to find zone for volume" + die_if_not_set $LINENO VOLUME_ZONE "Failure to find zone for volume" VOLUME=`euca-create-volume -s 1 -z $VOLUME_ZONE | cut -f2` - die_if_not_set VOLUME "Failure to create volume" + die_if_not_set $LINENO VOLUME "Failure to create volume" # Test that volume has been created VOLUME=`euca-describe-volumes | cut -f2` - die_if_not_set VOLUME "Failure to get volume" + die_if_not_set $LINENO VOLUME "Failure to get volume" # Test volume has become available if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then - echo "volume didnt become available within $RUNNING_TIMEOUT seconds" - exit 1 + die $LINENO "volume didnt become available within $RUNNING_TIMEOUT seconds" fi # Attach volume to an instance euca-attach-volume -i $INSTANCE -d $ATTACH_DEVICE $VOLUME || \ - die "Failure attaching volume $VOLUME to $INSTANCE" + die $LINENO "Failure attaching volume $VOLUME to $INSTANCE" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q in-use; do sleep 1; done"; then - echo "Could not attach $VOLUME to $INSTANCE" - exit 1 + die $LINENO "Could not attach $VOLUME to $INSTANCE" fi # Detach volume from an instance euca-detach-volume $VOLUME || \ - die "Failure detaching volume $VOLUME to $INSTANCE" + die $LINENO "Failure detaching volume $VOLUME to $INSTANCE" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then - echo "Could not detach $VOLUME to $INSTANCE" - exit 1 + die $LINENO "Could not detach $VOLUME to $INSTANCE" fi # Remove volume euca-delete-volume $VOLUME || \ - die "Failure to delete volume" + die $LINENO "Failure to delete volume" if ! timeout $ACTIVE_TIMEOUT sh -c "while euca-describe-volumes | grep $VOLUME; do sleep 1; done"; then - echo "Could not delete $VOLUME" - exit 1 + die $LINENO "Could not delete $VOLUME" fi else echo "Volume Tests Skipped" @@ -125,58 +119,55 @@ fi # Allocate floating address FLOATING_IP=`euca-allocate-address | cut -f2` -die_if_not_set FLOATING_IP "Failure allocating floating IP" +die_if_not_set $LINENO FLOATING_IP "Failure allocating floating IP" # Associate floating address euca-associate-address -i $INSTANCE $FLOATING_IP || \ - die "Failure associating address $FLOATING_IP to $INSTANCE" + die $LINENO "Failure associating address $FLOATING_IP to $INSTANCE" # Authorize pinging euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \ - die "Failure authorizing rule in $SECGROUP" + die $LINENO "Failure authorizing rule in $SECGROUP" # Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT # Revoke pinging euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \ - die "Failure revoking rule in $SECGROUP" + die $LINENO "Failure revoking rule in $SECGROUP" # Release floating address euca-disassociate-address $FLOATING_IP || \ - die "Failure disassociating address $FLOATING_IP" + die $LINENO "Failure disassociating address $FLOATING_IP" # Wait just a tick for everything above to complete so release doesn't fail if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep $INSTANCE | grep -q $FLOATING_IP; do sleep 1; done"; then - echo "Floating ip $FLOATING_IP not disassociated within $ASSOCIATE_TIMEOUT seconds" - exit 1 + die $LINENO "Floating ip $FLOATING_IP not disassociated within $ASSOCIATE_TIMEOUT seconds" fi # Release floating address euca-release-address $FLOATING_IP || \ - die "Failure releasing address $FLOATING_IP" + die $LINENO "Failure releasing address $FLOATING_IP" # Wait just a tick for everything above to complete so terminate doesn't fail if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep -q $FLOATING_IP; do sleep 1; done"; then - echo "Floating ip $FLOATING_IP not released within $ASSOCIATE_TIMEOUT seconds" - exit 1 + die $LINENO "Floating ip $FLOATING_IP not released within $ASSOCIATE_TIMEOUT seconds" fi # Terminate instance euca-terminate-instances $INSTANCE || \ - die "Failure terminating instance $INSTANCE" + die $LINENO "Failure terminating instance $INSTANCE" # Assure it has terminated within a reasonable time. The behaviour of this # case changed with bug/836978. Requesting the status of an invalid instance # will now return an error message including the instance id, so we need to # filter that out. if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -ve \"\\\(InstanceNotFound\\\|InvalidInstanceID\[.\]NotFound\\\)\" | grep -q $INSTANCE; do sleep 1; done"; then - echo "server didn't terminate within $TERMINATE_TIMEOUT seconds" - exit 1 + die $LINENO "server didn't terminate within $TERMINATE_TIMEOUT seconds" fi # Delete secgroup -euca-delete-group $SECGROUP || die "Failure deleting security group $SECGROUP" +euca-delete-group $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP" set +o xtrace echo "*********************************************************************" diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 34ab69d988..b4e1c423e6 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -71,7 +71,7 @@ glance image-list # Grab the id of the image to launch IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) -die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" +die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" # Security Groups # --------------- @@ -83,8 +83,7 @@ nova secgroup-list if ! nova secgroup-list | grep -q $SECGROUP; then nova secgroup-create $SECGROUP "$SECGROUP description" if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then - echo "Security group not created" - exit 1 + die $LINENO "Security group not created" fi fi @@ -115,7 +114,7 @@ fi # Clean-up from previous runs nova delete $VM_NAME || true if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then - echo "server didn't terminate!" + die $LINENO "server didn't terminate!" exit 1 fi @@ -123,17 +122,16 @@ fi # ------------- VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) -die_if_not_set VM_UUID "Failure launching $VM_NAME" +die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" # Check that the status is active within ACTIVE_TIMEOUT seconds if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then - echo "server didn't become active!" - exit 1 + die $LINENO "server didn't become active!" fi # Get the instance IP IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) -die_if_not_set IP "Failure retrieving IP address" +die_if_not_set $LINENO IP "Failure retrieving IP address" # Private IPs can be pinged in single node deployments ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT @@ -143,17 +141,16 @@ ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT # Allocate a floating IP from the default pool FLOATING_IP=$(nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1) -die_if_not_set FLOATING_IP "Failure creating floating IP from pool $DEFAULT_FLOATING_POOL" +die_if_not_set $LINENO FLOATING_IP "Failure creating floating IP from pool $DEFAULT_FLOATING_POOL" # List floating addresses if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then - echo "Floating IP not allocated" - exit 1 + die $LINENO "Floating IP not allocated" fi # Add floating IP to our server nova add-floating-ip $VM_UUID $FLOATING_IP || \ - die "Failure adding floating IP $FLOATING_IP to $VM_NAME" + die $LINENO "Failure adding floating IP $FLOATING_IP to $VM_NAME" # Test we can ping our floating IP within ASSOCIATE_TIMEOUT seconds ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT @@ -161,18 +158,17 @@ ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT if ! is_service_enabled quantum; then # Allocate an IP from second floating pool TEST_FLOATING_IP=$(nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1) - die_if_not_set TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL" + die_if_not_set $LINENO TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL" # list floating addresses if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then - echo "Floating IP not allocated" - exit 1 + die $LINENO "Floating IP not allocated" fi fi # Dis-allow icmp traffic (ping) nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \ - die "Failure deleting security group rule from $SECGROUP" + die $LINENO "Failure deleting security group rule from $SECGROUP" # FIXME (anthony): make xs support security groups if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then @@ -186,24 +182,23 @@ fi if ! is_service_enabled quantum; then # Delete second floating IP nova floating-ip-delete $TEST_FLOATING_IP || \ - die "Failure deleting floating IP $TEST_FLOATING_IP" + die $LINENO "Failure deleting floating IP $TEST_FLOATING_IP" fi # Delete the floating ip nova floating-ip-delete $FLOATING_IP || \ - die "Failure deleting floating IP $FLOATING_IP" + die $LINENO "Failure deleting floating IP $FLOATING_IP" # Delete instance -nova delete $VM_UUID || die "Failure deleting instance $VM_NAME" +nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME" # Wait for termination if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then - echo "Server $VM_NAME not deleted" - exit 1 + die $LINENO "Server $VM_NAME not deleted" fi # Delete secgroup nova secgroup-delete $SECGROUP || \ - die "Failure deleting security group $SECGROUP" + die $LINENO "Failure deleting security group $SECGROUP" set +o xtrace echo "*********************************************************************" diff --git a/exercises/horizon.sh b/exercises/horizon.sh index c5dae3ab64..5d778c9899 100755 --- a/exercises/horizon.sh +++ b/exercises/horizon.sh @@ -36,7 +36,7 @@ source $TOP_DIR/exerciserc is_service_enabled horizon || exit 55 # can we get the front page -curl http://$SERVICE_HOST 2>/dev/null | grep -q '

Log In

' || die "Horizon front page not functioning!" +curl http://$SERVICE_HOST 2>/dev/null | grep -q '

Log In

' || die $LINENO "Horizon front page not functioning!" set +o xtrace echo "*********************************************************************" diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh index bc33fe8279..5c4b16ea5e 100755 --- a/exercises/quantum-adv-test.sh +++ b/exercises/quantum-adv-test.sh @@ -264,7 +264,7 @@ function create_vm { --image $(get_image_id) \ $NIC \ $TENANT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` - die_if_not_set VM_UUID "Failure launching $TENANT-server$NUM" VM_UUID + die_if_not_set $LINENO VM_UUID "Failure launching $TENANT-server$NUM" confirm_server_active $VM_UUID } @@ -309,8 +309,7 @@ function shutdown_vm { function shutdown_vms { foreach_tenant_vm 'shutdown_vm ${%TENANT%_NAME} %NUM%' if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q ACTIVE; do sleep 1; done"; then - echo "Some VMs failed to shutdown" - false + die $LINENO "Some VMs failed to shutdown" fi } diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh index a33c9c6343..b73afdfd09 100755 --- a/exercises/sec_groups.sh +++ b/exercises/sec_groups.sh @@ -68,7 +68,7 @@ done # Delete secgroup nova secgroup-delete $SEC_GROUP_NAME || \ - die "Failure deleting security group $SEC_GROUP_NAME" + die $LINENO "Failure deleting security group $SEC_GROUP_NAME" set +o xtrace echo "*********************************************************************" diff --git a/exercises/swift.sh b/exercises/swift.sh index a75f955a2e..46ac2c5c06 100755 --- a/exercises/swift.sh +++ b/exercises/swift.sh @@ -45,20 +45,20 @@ CONTAINER=ex-swift # ============= # Check if we have to swift via keystone -swift stat || die "Failure geting status" +swift stat || die $LINENO "Failure geting status" # We start by creating a test container -swift post $CONTAINER || die "Failure creating container $CONTAINER" +swift post $CONTAINER || die $LINENO "Failure creating container $CONTAINER" # add some files into it. -swift upload $CONTAINER /etc/issue || die "Failure uploading file to container $CONTAINER" +swift upload $CONTAINER /etc/issue || die $LINENO "Failure uploading file to container $CONTAINER" # list them -swift list $CONTAINER || die "Failure listing contents of container $CONTAINER" +swift list $CONTAINER || die $LINENO "Failure listing contents of container $CONTAINER" # And we may want to delete them now that we have tested that # everything works. -swift delete $CONTAINER || die "Failure deleting container $CONTAINER" +swift delete $CONTAINER || die $LINENO "Failure deleting container $CONTAINER" set +o xtrace echo "*********************************************************************" diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 45cb0c8ed4..79136411ac 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -70,7 +70,7 @@ glance image-list # Grab the id of the image to launch IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) -die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" +die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" # Security Groups # --------------- @@ -114,25 +114,23 @@ fi # Clean-up from previous runs nova delete $VM_NAME || true if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then - echo "server didn't terminate!" - exit 1 + die $LINENO "server didn't terminate!" fi # Boot instance # ------------- VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) -die_if_not_set VM_UUID "Failure launching $VM_NAME" +die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" # Check that the status is active within ACTIVE_TIMEOUT seconds if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then - echo "server didn't become active!" - exit 1 + die $LINENO "server didn't become active!" fi # Get the instance IP IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) -die_if_not_set IP "Failure retrieving IP address" +die_if_not_set $LINENO IP "Failure retrieving IP address" # Private IPs can be pinged in single node deployments ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT @@ -142,42 +140,38 @@ ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT # Verify it doesn't exist if [[ -n $(cinder list | grep $VOL_NAME | head -1 | get_field 2) ]]; then - echo "Volume $VOL_NAME already exists" - exit 1 + die $LINENO "Volume $VOL_NAME already exists" fi # Create a new volume start_time=$(date +%s) cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ - die "Failure creating volume $VOL_NAME" + die $LINENO "Failure creating volume $VOL_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then - echo "Volume $VOL_NAME not created" - exit 1 + die $LINENO "Volume $VOL_NAME not created" fi end_time=$(date +%s) echo "Completed cinder create in $((end_time - start_time)) seconds" # Get volume ID VOL_ID=$(cinder list | grep $VOL_NAME | head -1 | get_field 1) -die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME" +die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME" # Attach to server DEVICE=/dev/vdb start_time=$(date +%s) nova volume-attach $VM_UUID $VOL_ID $DEVICE || \ - die "Failure attaching volume $VOL_NAME to $VM_NAME" + die $LINENO "Failure attaching volume $VOL_NAME to $VM_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then - echo "Volume $VOL_NAME not attached to $VM_NAME" - exit 1 + die $LINENO "Volume $VOL_NAME not attached to $VM_NAME" fi end_time=$(date +%s) echo "Completed volume-attach in $((end_time - start_time)) seconds" VOL_ATTACH=$(cinder list | grep $VOL_NAME | head -1 | get_field -1) -die_if_not_set VOL_ATTACH "Failure retrieving $VOL_NAME status" +die_if_not_set $LINENO VOL_ATTACH "Failure retrieving $VOL_NAME status" if [[ "$VOL_ATTACH" != $VM_UUID ]]; then - echo "Volume not attached to correct instance" - exit 1 + die $LINENO "Volume not attached to correct instance" fi # Clean up @@ -185,33 +179,30 @@ fi # Detach volume start_time=$(date +%s) -nova volume-detach $VM_UUID $VOL_ID || die "Failure detaching volume $VOL_NAME from $VM_NAME" +nova volume-detach $VM_UUID $VOL_ID || die $LINENO "Failure detaching volume $VOL_NAME from $VM_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then - echo "Volume $VOL_NAME not detached from $VM_NAME" - exit 1 + die $LINENO "Volume $VOL_NAME not detached from $VM_NAME" fi end_time=$(date +%s) echo "Completed volume-detach in $((end_time - start_time)) seconds" # Delete volume start_time=$(date +%s) -cinder delete $VOL_ID || die "Failure deleting volume $VOL_NAME" +cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOL_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then - echo "Volume $VOL_NAME not deleted" - exit 1 + die $LINENO "Volume $VOL_NAME not deleted" fi end_time=$(date +%s) echo "Completed cinder delete in $((end_time - start_time)) seconds" # Delete instance -nova delete $VM_UUID || die "Failure deleting instance $VM_NAME" +nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME" if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then - echo "Server $VM_NAME not deleted" - exit 1 + die $LINENO "Server $VM_NAME not deleted" fi # Delete secgroup -nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" +nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP" set +o xtrace echo "*********************************************************************" diff --git a/functions b/functions index b94c611446..459aedd34c 100644 --- a/functions +++ b/functions @@ -57,8 +57,15 @@ function cp_it { # die "message" function die() { local exitcode=$? + if [ $exitcode == 0 ]; then + exitcode=1 + fi set +o xtrace - echo $@ + local msg="[ERROR] $0:$1 $2" + echo $msg 1>&2; + if [[ -n ${SCREEN_LOGDIR} ]]; then + echo $msg >> "${SCREEN_LOGDIR}/error.log" + fi exit $exitcode } @@ -71,10 +78,9 @@ function die_if_not_set() { ( local exitcode=$? set +o xtrace - local evar=$1; shift + local evar=$2; shift if ! is_set $evar || [ $exitcode != 0 ]; then - echo $@ - exit -1 + die $@ fi ) } @@ -406,12 +412,10 @@ function exit_distro_not_supported { fi if [ $# -gt 0 ]; then - echo "Support for $DISTRO is incomplete: no support for $@" + die $LINENO "Support for $DISTRO is incomplete: no support for $@" else - echo "Support for $DISTRO is incomplete." + die $LINENO "Support for $DISTRO is incomplete." fi - - exit 1 } @@ -1087,9 +1091,9 @@ function _ping_check_novanet() { fi if ! timeout $boot_timeout sh -c "$check_command"; then if [[ "$expected" = "True" ]]; then - echo "[Fail] Couldn't ping server" + die $LINENO "[Fail] Couldn't ping server" else - echo "[Fail] Could ping server" + die $LINENO "[Fail] Could ping server" fi exit 1 fi @@ -1113,8 +1117,7 @@ function _ssh_check_novanet() { local ACTIVE_TIMEOUT=$5 local probe_cmd="" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success ; do sleep 1; done"; then - echo "server didn't become ssh-able!" - exit 1 + die $LINENO "server didn't become ssh-able!" fi } diff --git a/lib/glance b/lib/glance index 80d3902aab..cbe47fc751 100644 --- a/lib/glance +++ b/lib/glance @@ -187,8 +187,7 @@ function start_glance() { screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then - echo "g-api did not start" - exit 1 + die $LINENO "g-api did not start" fi } diff --git a/lib/keystone b/lib/keystone index a1a57f83bd..25803515f0 100644 --- a/lib/keystone +++ b/lib/keystone @@ -323,8 +323,7 @@ function start_keystone() { screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" echo "Waiting for keystone to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s http://$SERVICE_HOST:$service_port/v2.0/ >/dev/null; do sleep 1; done"; then - echo "keystone did not start" - exit 1 + die $LINENO "keystone did not start" fi # Start proxies if enabled diff --git a/lib/nova b/lib/nova index 849ec5730d..374979089d 100644 --- a/lib/nova +++ b/lib/nova @@ -542,8 +542,7 @@ function start_nova_api() { screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api" echo "Waiting for nova-api to start..." if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then - echo "nova-api did not start" - exit 1 + die $LINENO "nova-api did not start" fi # Start proxies if enabled diff --git a/lib/quantum b/lib/quantum index 0fef1aebd0..9068f62a42 100644 --- a/lib/quantum +++ b/lib/quantum @@ -351,8 +351,7 @@ function start_quantum_service_and_check() { screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" echo "Waiting for Quantum to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$Q_HOST:$Q_PORT; do sleep 1; done"; then - echo "Quantum did not start" - exit 1 + die $LINENO "Quantum did not start" fi } @@ -396,8 +395,7 @@ function _configure_quantum_common() { quantum_plugin_configure_common if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then - echo "Quantum plugin not set.. exiting" - exit 1 + die $LINENO "Quantum plugin not set.. exiting" fi # If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``QUANTUM_CONF_DIR`` @@ -511,8 +509,7 @@ function _configure_quantum_service() { if is_service_enabled $DATABASE_BACKENDS; then recreate_database $Q_DB_NAME utf8 else - echo "A database must be enabled in order to use the $Q_PLUGIN Quantum plugin." - exit 1 + die $LINENO "A database must be enabled in order to use the $Q_PLUGIN Quantum plugin." fi # Update either configuration file with plugin @@ -661,11 +658,10 @@ function _ping_check_quantum() { fi if ! timeout $timeout_sec sh -c "$check_command"; then if [[ "$expected" = "True" ]]; then - echo "[Fail] Couldn't ping server" + die $LINENO "[Fail] Couldn't ping server" else - echo "[Fail] Could ping server" + die $LINENO "[Fail] Could ping server" fi - exit 1 fi } @@ -679,8 +675,7 @@ function _ssh_check_quantum() { local probe_cmd = "" probe_cmd=`_get_probe_cmd_prefix $from_net` if ! timeout $timeout_sec sh -c "while ! $probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success ; do sleep 1; done"; then - echo "server didn't become ssh-able!" - exit 1 + die $LINENO "server didn't become ssh-able!" fi } diff --git a/lib/quantum_plugins/nicira b/lib/quantum_plugins/nicira index bc9a36f299..8c150b11f5 100644 --- a/lib/quantum_plugins/nicira +++ b/lib/quantum_plugins/nicira @@ -19,8 +19,7 @@ function setup_integration_bridge() { conn=(${NVP_CONTROLLER_CONNECTION//\:/ }) OVS_MGR_IP=${conn[0]} else - echo "Error - No controller specified. Unable to set a manager for OVS" - exit 1 + die $LINENO "Error - No controller specified. Unable to set a manager for OVS" fi sudo ovs-vsctl set-manager ssl:$OVS_MGR_IP } @@ -63,14 +62,12 @@ function quantum_plugin_configure_dhcp_agent() { function quantum_plugin_configure_l3_agent() { # Nicira plugin does not run L3 agent - echo "ERROR - q-l3 should must not be executed with Nicira plugin!" - exit 1 + die $LINENO "q-l3 should must not be executed with Nicira plugin!" } function quantum_plugin_configure_plugin_agent() { # Nicira plugin does not run L2 agent - echo "ERROR - q-agt must not be executed with Nicira plugin!" - exit 1 + die $LINENO "q-agt must not be executed with Nicira plugin!" } function quantum_plugin_configure_service() { @@ -93,8 +90,7 @@ function quantum_plugin_configure_service() { if [[ "$DEFAULT_TZ_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" default_tz_uuid $DEFAULT_TZ_UUID else - echo "ERROR - The nicira plugin won't work without a default transport zone." - exit 1 + die $LINENO "The nicira plugin won't work without a default transport zone." fi if [[ "$DEFAULT_L3_GW_SVC_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID @@ -114,8 +110,7 @@ function quantum_plugin_configure_service() { # Only 1 controller can be specified in this case iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" nvp_controller_connection $NVP_CONTROLLER_CONNECTION else - echo "ERROR - The nicira plugin needs at least an NVP controller." - exit 1 + die $LINENO "The nicira plugin needs at least an NVP controller." fi if [[ "$NVP_USER" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" nvp_user $NVP_USER diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch index 726c6c3ed5..288fa69bf2 100644 --- a/lib/quantum_plugins/openvswitch +++ b/lib/quantum_plugins/openvswitch @@ -49,9 +49,7 @@ function quantum_plugin_configure_plugin_agent() { # REVISIT - also check kernel module support for GRE and patch ports OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'` if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then - echo "You are running OVS version $OVS_VERSION." - echo "OVS 1.4+ is required for tunneling between multiple hosts." - exit 1 + die $LINENO "You are running OVS version $OVS_VERSION. OVS 1.4+ is required for tunneling between multiple hosts." fi iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP diff --git a/lib/rpc_backend b/lib/rpc_backend index f35f9dbd57..7ea71ee130 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -39,8 +39,7 @@ function check_rpc_backend() { fi if is_service_enabled qpid && ! qpid_is_supported; then - echo "Qpid support is not available for this version of your distribution." - exit 1 + die $LINENO "Qpid support is not available for this version of your distribution." fi } diff --git a/stack.sh b/stack.sh index a4106e51e8..a1af00b276 100755 --- a/stack.sh +++ b/stack.sh @@ -55,8 +55,7 @@ GetDistro # allow you to safely override those settings. if [[ ! -r $TOP_DIR/stackrc ]]; then - echo "ERROR: missing $TOP_DIR/stackrc - did you grab more than just stack.sh?" - exit 1 + log_error $LINENO "missing $TOP_DIR/stackrc - did you grab more than just stack.sh?" fi source $TOP_DIR/stackrc @@ -93,8 +92,7 @@ disable_negated_services if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18|opensuse-12.2) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then - echo "If you wish to run this script anyway run with FORCE=yes" - exit 1 + die $LINENO "If you wish to run this script anyway run with FORCE=yes" fi fi @@ -105,16 +103,14 @@ check_rpc_backend # ``stack.sh`` keeps function libraries here # Make sure ``$TOP_DIR/lib`` directory is present if [ ! -d $TOP_DIR/lib ]; then - echo "ERROR: missing devstack/lib" - exit 1 + log_error $LINENO "missing devstack/lib" fi # ``stack.sh`` keeps the list of ``apt`` and ``rpm`` dependencies and config # templates and other useful files in the ``files`` subdirectory FILES=$TOP_DIR/files if [ ! -d $FILES ]; then - echo "ERROR: missing devstack/files" - exit 1 + log_error $LINENO "missing devstack/files" fi SCREEN_NAME=${SCREEN_NAME:-stack} @@ -248,9 +244,7 @@ if [ -z "$HOST_IP" -o "$HOST_IP" == "dhcp" ]; then fi done if [ "$HOST_IP" == "" ]; then - echo "Could not determine host ip address." - echo "Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted" - exit 1 + die $LINENO "Could not determine host ip address. Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted" fi fi