tox verification and tarball venv packaging support.

Optionally run tox on each git repo (-t) and package resultant
venv into a tarball on success (-p)

tarball filename includes the short-sha1 of the repo.

If tarball is created, screen is not run.

if the -d option is given the cmdline incantation for kicking
off the ansible deploy playbook will be provided. Later, this
could be automated.

A mysql role playbook is also included for single node db.

stv3-api role is also included.

Change-Id: Ib9f97a0035c228d4a163d7794ab7ce60043d572b
This commit is contained in:
Sandy Walsh 2014-10-21 13:39:55 -07:00
parent 678d1449ac
commit 24f118f9ac
29 changed files with 1152 additions and 6 deletions

36
ansible/README.md Normal file
View File

@ -0,0 +1,36 @@
stv3-config
==============
Configuration playbooks for StackTach.v3 deployments
Assumes an inventory value that has nodes or groups that start with "stv3-api" or "stv3-workers".
Execution would look like:
```bash
ansible-playbook workers.yaml
```
Assumes a stv3-db setup already exists.
There are also roles for database and api. The `common` role is responsible for installing the tarball and creating the necessary user/group accounts. Both the API and workers depend on the common role since they both require the codebase and winchester configuration files.
What it does
------------
* Creates `stv3` user and `stv3` group
* Creates `/etc/stv3` directory for configuration data
* Creates `/var/run/stv3` directory for pid files
* Creates `/var/log/stv3` directory for log files
* Copies config files to `/etc/stv3`
* Copies init.d files to `/etc/init.d` for yagi-events and pipeline-worker
* Copies and expands the StackTach.v3 tarball to `/opt/stv3`
* Starts the yagi worker daemon and the winchester worker
(yagi-events and pipeline-worker respectively)
The init.d files handle the .pid file creation and running as stv3 user.
While yagi-events and pipeline-worker are capable to running daemonized, we don't use that code.
Instead, we let the init.d scripts handle the backgrounding and process management.
The connection from the host machine to the target machine has to have a secure account already created for anisble to run. Currently it assumes an account called `stacktach` and it has root capabilities. When the daemons run, they run as `stv3` ... which is just a service account.

21
ansible/api.yaml Normal file
View File

@ -0,0 +1,21 @@
## Main entry point for the StackTach.v3 worker deployments
##
## Assumes an inventory value that has nodes or groups that start with
## "stv3-api"
##
## Execution would look like:
## ansible-playbook api.yaml -vv
##
## Assumes a stv3-db setup already exists.
---
- name: StackTach API Configuration
hosts: stv3-api
remote_user: stacktach
sudo: yes
vars_files:
- ["vars/local_settings.yaml"]
roles:
- common
- stv3-api

19
ansible/database.yaml Normal file
View File

@ -0,0 +1,19 @@
## Main entry point for the StackTach.v3 db master deployments
##
## Assumes an inventory value that has nodes or groups that start with
## "stv3-db"
##
## Execution would look like:
## ansible-playbook database.yaml
##
---
- name: StackTach DB Master Configurations
hosts: stv3-db
remote_user: stacktach
sudo: yes
vars_files:
- ["vars/local_settings.yaml"]
roles:
- stv3-db

View File

@ -0,0 +1,31 @@
## Lowest level config defaults for the common role
# used in winchester.yaml
config_path:
- /etc/stv3
# used in logging.conf
root_log_level: WARNING
yagi_log_level: INFO
winchester_log_level: DEBUG
amqp_log_level: WARNING
# "level = INFO" logs SQL queries.
# "level = DEBUG" logs SQL queries and results.
# "level = WARNING" logs neither. Recommended for production systems
sqlalchemy_log_level: WARNING
database_url: mysql://winchester:testpasswd@localhost/winchester
statistics_period: 10
pipeline_worker_batch_size: 1000
pipeline_worker_delay: 10
#stackdistiller_plugins:
# - my_plugin_1
# - my_plugin_2
catch_all_notifications: false
pipeline_handlers:
logger: winchester.pipeline_handler:LoggingHandler

View File

@ -0,0 +1,36 @@
---
- event_type: compute.*
traits: &instance_traits
tenant_id:
fields: payload.tenant_id
user_id:
fields: payload.user_id
instance_id:
fields: payload.instance_id
host:
fields: publisher_id
plugin:
name: split
parameters:
segment: 1
max_split: 1
service:
fields: publisher_id
plugin: split
state:
fields: payload.state
launched_at:
type: datetime
fields: payload.launched_at
deleted_at:
type: datetime
fields: payload.deleted_at
- event_type: compute.instance.exists
traits:
<<: *instance_traits
audit_period_beginning:
type: datetime
fields: payload.audit_period_beginning
audit_period_ending:
type: datetime
fields: payload.audit_period_ending

View File

@ -0,0 +1,89 @@
- event_type: compute.instance.*
traits: &instance_traits
tenant_id:
fields:
- payload.tenant_id
- _context_project_id
user_id:
fields: payload.user_id
request_id:
fields: _context_request_id
message:
fields: payload.message
instance_id:
fields:
- payload.instance_uuid
- payload.instance_id
- exception.kwargs.uuid
- instance.uuid
host:
fields: publisher_id
plugin:
name: split
parameters:
segment: 1
max_split: 1
service:
fields: publisher_id
plugin: split
instance_flavor:
fields:
- payload.instance_type
- payload.image_meta.instance_type_name
- payload.image_meta.instance_type_flavorid
instance_flavor_id:
type: int
fields:
- payload.instance_flavor_id
memory_mb:
type: int
fields: payload.memory_mb
disk_gb:
type: int
fields: payload.disk_gb
root_gb:
type: int
fields: payload.root_gb
ephemeral_gb:
type: int
fields: payload.ephemeral_gb
vcpus:
type: int
fields: payload.vcpus
state:
fields: payload.state
os_architecture:
fields: payload.image_meta.'org.openstack__1__architecture'
os_version:
fields: payload.image_meta.'org.openstack__1__os_version'
os_distro:
fields: payload.image_meta.'org.openstack__1__os_distro'
launched_at:
type: datetime
fields: payload.launched_at
deleted_at:
type: datetime
fields:
- payload.deleted_at
- payload.terminated_at
- event_type: compute.instance.exists
traits:
<<: *instance_traits
audit_period_beginning:
type: datetime
fields: payload.audit_period_beginning
audit_period_ending:
type: datetime
fields: payload.audit_period_ending
- event_type: snapshot_instance
traits:
<<: *instance_traits
- event_type: scheduler.run_instance.*
traits:
<<: *instance_traits
- event_type: keypair.import.*
traits:
<<: *instance_traits
- event_type: rebuild_instance
traits:
<<: *instance_traits

View File

@ -0,0 +1,5 @@
---
test_pipeline:
- logger
test_expire_pipeline:
- logger

View File

@ -0,0 +1,23 @@
---
- name: test_trigger
debug_level: 2
distinguished_by:
- instance_id
- timestamp: "day"
expiration: "$last + 1h"
fire_pipeline: "test_pipeline"
expire_pipeline: "test_expire_pipeline"
match_criteria:
- event_type:
- compute.instance.*
- "!compute.instance.exists"
#### Traits are optional.
# traits:
# os_distro: ubuntu
# memory_mb:
# numeric: "> 4096"
- event_type: compute.instance.exists
map_distingushed_trait:
timestamp: audit_period_beginning
fire_criteria:
- event_type: compute.instance.exists

View File

@ -0,0 +1,76 @@
## Main task file for common role
---
- name: stv3 group
group: name=stv3 system=yes state=present
- name: stv3 user
user: name=stv3 group=stv3 createhome=no system=yes state=present
- name: stv3 config directory
file: path=/etc/stv3 state=directory owner=stv3 group=stv3
mode=0755
- name: stv3 /var/run directory for pid
file: path=/var/run/stv3 state=directory owner=stv3 group=stv3
mode=0755
- name: stv3 /var/log directory for logfiles
file: path=/var/log/stv3 state=directory owner=stv3 group=stv3
mode=0755
- name: install unzip
apt: name=unzip update_cache=yes
- name: install mysql-client
apt: name=mysql-client update_cache=yes
- name: move tarball
unarchive: src={{tarball_absolute_path}} dest=/opt
owner=stv3 group=stv3
- name: fix source code ownership
file: path=/opt/stv3 owner=stv3 group=stv3 recurse=yes
- name: winchester.yaml
template: src=winchester.yaml.j2 dest=/etc/stv3/winchester.yaml owner=stv3
group=stv3 mode=0644
notify:
- restart yagi-event
- restart pipeline-worker
- restart stv3-api
- name: distiller.conf
copy: src=distiller.conf dest=/etc/stv3/distiller.conf
owner=stv3 group=stv3 mode=0644
notify:
- restart yagi-event
- restart pipeline-worker
- name: pipelines.yaml
copy: src=pipelines.yaml dest=/etc/stv3/pipelines.yaml
owner=stv3 group=stv3 mode=0644
notify:
- restart yagi-event
- restart pipeline-worker
- name: triggers.yaml
copy: src=triggers.yaml dest=/etc/stv3/triggers.yaml
owner=stv3 group=stv3 mode=0644
notify:
- restart yagi-event
- restart pipeline-worker
- name: event_definitions.yaml
copy: src=event_definitions.yaml dest=/etc/stv3/event_definitions.yaml
owner=stv3 group=stv3 mode=0644
notify:
- restart yagi-event
- restart pipeline-worker
- name: logging.conf
template: src=logging.conf.j2 dest=/etc/stv3/logging.conf owner=stv3
group=stv3 mode=0644
notify:
- restart yagi-event
- restart pipeline-worker

View File

@ -0,0 +1,68 @@
# Machine-generated by ansible - do not edit!
[loggers]
keys = root, yagi, winchester
[handlers]
keys = stderr, stdout, null
[formatters]
keys = yagi, default
[logger_root]
level = {{ root_log_level }}
handlers = null
[logger_yagi]
level = {{ yagi_log_level }}
handlers = stderr
qualname = yagi
[logger_winchester]
level = {{ winchester_log_level }}
handlers = stderr
qualname = winchester
[logger_amqplib]
level = {{ amqp_log_level }}
handlers = stderr
qualname = amqplib
[logger_sqlalchemy]
# yagi does not use sqlalchemy... yet. -mdragon
level = {{ sqlalchemy_log_level }}
handlers = stderr
qualname = sqlalchemy
[handler_stderr]
class = StreamHandler
args = (sys.stderr,)
formatter = yagi
[handler_stdout]
class = StreamHandler
args = (sys.stdout,)
formatter = yagi
[handler_watchedfile]
class = handlers.WatchedFileHandler
args = ()
formatter = yagi
[handler_syslog]
class = handlers.SysLogHandler
args = ('/var/log/stv3', handlers.SysLogHandler.LOG_USER)
formatter = yagi
[handler_null]
class = NullHandler
formatter = default
args = ()
[formatter_yagi]
# substitutions available for formats are documented at:
# https://docs.python.org/2/library/logging.html#logrecord-attributes
format = %(name)s[%(levelname)s at %(asctime)s line: %(lineno)d] %(message)s
[formatter_default]
format = %(message)s

View File

@ -0,0 +1,38 @@
---
# Machine generated via ansible - do not edit!
###### This adds directories to the search path for other configfiles.
config_path:
{% for path in config_path %}
- {{ path }}
{% endfor %}
###### logging
logging_config: /etc/stv3/logging.conf
###### How often to log stats
statistics_period: {{ statistics_period }}
pipeline_worker_batch_size: {{ pipeline_worker_batch_size }}
pipeline_worker_delay: {{ pipeline_worker_delay }}
{% if stackdistiller_plugins is defined %}
distiller_trait_plugins:
{% for plugin in stackdistiller_plugins %}
- {{ plugin }}
{% endfor %}
{% endif %}
catch_all_notifications: {{ catch_all_notifications }}
database:
url: {{ database_url }}
distiller_config: /etc/stv3/event_definitions.yaml
trigger_definitions: /etc/stv3/triggers.yaml
pipeline_config: /etc/stv3/pipelines.yaml
pipeline_handlers:
{% for key, value in pipeline_handlers.iteritems() %}
{{ key }}: {{value}}
{% endfor %}

View File

@ -0,0 +1,7 @@
[global]
enabled_versions=1,2
v1_impl=quince.v1_impl:Impl
v2_impl=quince.v2_impl:Impl
[quince]
winchester_config=/etc/stv3/winchester.yaml

View File

@ -0,0 +1,99 @@
#! /bin/sh
### BEGIN INIT INFO
# Provides: Gunicorn HTTP service
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Gunicorn HTTP service.
# Description: Gunicorn HTTP service.
### END INIT INFO
# Author: Sandy Walsh <sandy.walsh@rackspace.com>
# Do NOT "set -e"
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/opt/stv3/bin:/sbin:/usr/sbin:/bin:/usr/bin
DESC="Gunicorn service control"
NAME=gunicorn
DAEMON=/opt/stv3/bin/$NAME
DAEMON_ARGS="--log-file=/var/log/stv3/gunicorn.log quincy.api:get_api(config_location=\"/etc/stv3/quincy.conf\")"
PIDFILE=/var/run/stv3/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
USER=stv3
GROUP=stv3
# Exit if the package is not installed
[ -x "$DAEMON" ] || exit 0
# Read configuration variable file if it is present
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
# Load the VERBOSE setting and other rcS variables
. /lib/init/vars.sh
VERBOSE=yes
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.
. /lib/lsb/init-functions
#
# Function that starts the daemon/service
#
do_start()
{
. /opt/stv3/bin/activate
start-stop-daemon --start --name ${NAME} --chdir /var/run/stv3 \
--chuid ${USER}:${GROUP} --background \
--make-pidfile --pidfile ${PIDFILE} \
--exec ${DAEMON} -- ${DAEMON_ARGS}
}
#
# Function that stops the daemon/service
#
do_stop()
{
. /opt/stv3/bin/activate
log_daemon_msg "Stopping ${DAEMON}... " ${DAEMON}
start-stop-daemon --stop --oknodo --pidfile ${PIDFILE} --retry=TERM/30/KILL/5
log_end_msg $?
}
case "$1" in
start)
[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
stop)
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
do_stop
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
status)
status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
;;
restart|force-reload)
log_daemon_msg "Restarting $DESC" "$NAME"
do_stop
do_start
;;
*)
#echo "Usage: $SCRIPTNAME {start|stop|restart|reload|force-reload}" >&2
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
exit 3
;;
esac
:

View File

@ -0,0 +1,3 @@
- name: restart stv3-api
service: name=stv3-api state=restarted
when: not stv3-api|changed

View File

@ -0,0 +1,19 @@
## Main task file for stv3-api role
# All the users, groups, directories and code are
# set up by the common role.
---
- name: quincy.conf
copy: src=quincy.conf dest=/etc/stv3/quincy.conf
owner=stv3 group=stv3 mode=0644
notify:
- restart stv3-api
- name: stv3-api-initd
copy: src=stv3-api.debian.init.d dest=/etc/init.d/stv3-api
owner=stv3 group=stv3 mode=0755
- name: stv3-api
debug: msg="Starting stv3-api"
notify:
- restart stv3-api

View File

@ -0,0 +1,2 @@
root_db_password: password
winchester_db_password: testpasswd

View File

@ -0,0 +1,25 @@
## Main task file for stv3-db role
---
- name: install mysql-server
apt: name=mysql-server update_cache=yes
- name: install python mysql lib
apt: name=python-mysqldb
- name: ensure mysql is running and starts on boot
service: name=mysql state=started enabled=true
# Need to do this for idempotency, see
# http://ansible.cc/docs/modules.html#mysql-user
- name: update mysql root password for all root accounts
mysql_user: name=root host=localhost password={{ root_db_password }}
# tricky ... could have already been change from a previous run?
ignore_errors: yes
- name: copy .my.cnf file with root password credentials
template: src=my.cnf.j2 dest=/root/.my.cnf owner=root mode=0600
- mysql_db: name=winchester state=present login_user=root login_password={{ root_db_password }}
- name: create winchester db user
mysql_user: name=winchester host=localhost password={{ winchester_db_password }} priv=*.*:ALL \
login_user=root login_password={{ root_db_password}}

View File

@ -0,0 +1,127 @@
#
# The MySQL database server configuration file.
#
# You can copy this to one of:
# - "/etc/mysql/my.cnf" to set global options,
# - "~/.my.cnf" to set user-specific options.
#
# One can use all long options that the program supports.
# Run program with --help to get a list of available options and with
# --print-defaults to see which it would actually understand and use.
#
# For explanations see
# http://dev.mysql.com/doc/mysql/en/server-system-variables.html
# This will be passed to all mysql clients
# It has been reported that passwords should be enclosed with ticks/quotes
# escpecially if they contain "#" chars...
# Remember to edit /etc/mysql/debian.cnf when changing the socket location.
[client]
port = 3306
socket = /var/run/mysqld/mysqld.sock
# Here is entries for some specific programs
# The following values assume you have at least 32M ram
# This was formally known as [safe_mysqld]. Both versions are currently parsed.
[mysqld_safe]
socket = /var/run/mysqld/mysqld.sock
nice = 0
[mysqld]
#
# * Basic Settings
#
user = mysql
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
port = 3306
basedir = /usr
datadir = /var/lib/mysql
tmpdir = /tmp
lc-messages-dir = /usr/share/mysql
skip-external-locking
#
# Instead of skip-networking the default is now to listen only on
# localhost which is more compatible and is not less secure.
bind-address = 127.0.0.1
#
# * Fine Tuning
#
key_buffer = 16M
max_allowed_packet = 16M
thread_stack = 192K
thread_cache_size = 8
# This replaces the startup script and checks MyISAM tables if needed
# the first time they are touched
myisam-recover = BACKUP
#max_connections = 100
#table_cache = 64
#thread_concurrency = 10
#
# * Query Cache Configuration
#
query_cache_limit = 1M
query_cache_size = 16M
#
# * Logging and Replication
#
# Both location gets rotated by the cronjob.
# Be aware that this log type is a performance killer.
# As of 5.1 you can enable the log at runtime!
#general_log_file = /var/log/mysql/mysql.log
#general_log = 1
#
# Error log - should be very few entries.
#
log_error = /var/log/mysql/error.log
#
# Here you can see queries with especially long duration
log_slow_queries = /var/log/mysql/mysql-slow.log
long_query_time = 2
log-queries-not-using-indexes
#
# The following can be used as easy to replay backup logs or for replication.
# note: if you are setting up a replication slave, see README.Debian about
# other settings you may need to change.
#server-id = 1
#log_bin = /var/log/mysql/mysql-bin.log
expire_logs_days = 10
max_binlog_size = 100M
#binlog_do_db = include_database_name
#binlog_ignore_db = include_database_name
#
# * InnoDB
#
# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/.
# Read the manual for more InnoDB related options. There are many!
#
# * Security Features
#
# Read the manual, too, if you want chroot!
# chroot = /var/lib/mysql/
#
# For generating SSL certificates I recommend the OpenSSL GUI "tinyca".
#
# ssl-ca=/etc/mysql/cacert.pem
# ssl-cert=/etc/mysql/server-cert.pem
# ssl-key=/etc/mysql/server-key.pem
[mysqldump]
quick
quote-names
max_allowed_packet = 16M
[mysql]
#no-auto-rehash # faster start of mysql but no tab completition
[isamchk]
key_buffer = 16M
#
# * IMPORTANT: Additional settings that can override those from this file!
# The files must end with '.cnf', otherwise they'll be ignored.
#
!includedir /etc/mysql/conf.d/

View File

@ -0,0 +1,14 @@
## Lowest level config defaults for the stv3-worker role
# used in yagi.conf
rabbit_host: localhost
rabbit_user: guest
rabbit_password: password
rabbit_port: 5672
rabbit_vhost: /
rabbit_exchange: nova
# shoebox credentials ...
swift_region: DFW
swift_username: my_username
rax_api_key: my_api_key

View File

@ -0,0 +1,100 @@
#! /bin/sh
### BEGIN INIT INFO
# Provides: Winchester pipeline worker service
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Winchester pipeline worker daemon control.
# Description: Winchester pipeline worker daemon control.
### END INIT INFO
# Author: Sandy Walsh <sandy.walsh@rackspace.com>
# Do NOT "set -e"
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/opt/stv3/bin:/sbin:/usr/sbin:/bin:/usr/bin
DESC="Winchester pipeline worker control"
NAME=pipeline_worker
DAEMON=/opt/stv3/bin/$NAME
DAEMON_ARGS="--config /etc/stv3/winchester.yaml"
PIDFILE=/var/run/stv3/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
USER=stv3
GROUP=stv3
# Exit if the package is not installed
[ -x "$DAEMON" ] || exit 0
# Read configuration variable file if it is present
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
# Load the VERBOSE setting and other rcS variables
. /lib/init/vars.sh
VERBOSE=yes
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.
. /lib/lsb/init-functions
#
# Function that starts the daemon/service
#
do_start()
{
. /opt/stv3/bin/activate
winchester_db -c /etc/stv3/winchester.yaml upgrade head
start-stop-daemon --start --name ${NAME} --chdir /var/run/stv3 \
--chuid ${USER}:${GROUP} --background \
--make-pidfile --pidfile ${PIDFILE} \
--exec ${DAEMON} -- ${DAEMON_ARGS}
}
#
# Function that stops the daemon/service
#
do_stop()
{
. /opt/stv3/bin/activate
log_daemon_msg "Stopping ${DAEMON}... " ${DAEMON}
start-stop-daemon --stop --oknodo --pidfile ${PIDFILE} --retry=TERM/30/KILL/5
log_end_msg $?
}
case "$1" in
start)
[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
stop)
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
do_stop
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
status)
status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
;;
restart|force-reload)
log_daemon_msg "Restarting $DESC" "$NAME"
do_stop
do_start
;;
*)
#echo "Usage: $SCRIPTNAME {start|stop|restart|reload|force-reload}" >&2
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
exit 3
;;
esac
:

View File

@ -0,0 +1,100 @@
#! /bin/sh
### BEGIN INIT INFO
# Provides: YAGI worker service
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: YAGI worker daemon control.
# Description: YAGI worker daemon control.
### END INIT INFO
# Author: Sandy Walsh <sandy.walsh@rackspace.com>
# Do NOT "set -e"
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/opt/stv3/bin:/sbin:/usr/sbin:/bin:/usr/bin
DESC="YAGI worker control"
NAME=yagi-event
DAEMON=/opt/stv3/bin/$NAME
DAEMON_ARGS="--config /etc/stv3/yagi.conf"
PIDFILE=/var/run/stv3/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
USER=stv3
GROUP=stv3
# Exit if the package is not installed
[ -x "$DAEMON" ] || exit 0
# Read configuration variable file if it is present
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
# Load the VERBOSE setting and other rcS variables
. /lib/init/vars.sh
VERBOSE=yes
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.
. /lib/lsb/init-functions
#
# Function that starts the daemon/service
#
do_start()
{
. /opt/stv3/bin/activate
winchester_db -c /etc/stv3/winchester.yaml upgrade head
start-stop-daemon --start --name ${NAME} --chdir /var/run/stv3 \
--chuid ${USER}:${GROUP} --background \
--make-pidfile --pidfile ${PIDFILE} \
--exec ${DAEMON} -- ${DAEMON_ARGS}
}
#
# Function that stops the daemon/service
#
do_stop()
{
. /opt/stv3/bin/activate
log_daemon_msg "Stopping ${DAEMON}... " ${DAEMON}
start-stop-daemon --stop --oknodo --pidfile ${PIDFILE} --retry=TERM/30/KILL/5
log_end_msg $?
}
case "$1" in
start)
[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
stop)
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
do_stop
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
status)
status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
;;
restart|force-reload)
log_daemon_msg "Restarting $DESC" "$NAME"
do_stop
do_start
;;
*)
#echo "Usage: $SCRIPTNAME {start|stop|restart|reload|force-reload}" >&2
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
exit 3
;;
esac
:

View File

@ -0,0 +1,9 @@
# Only restart stv3-worker if we didn't already go from stopped
# to started by the stv3 worker process task
- name: restart yagi-event
service: name=yagi-event state=restarted
when: not yagi-event|changed
- name: restart pipeline-worker
service: name=pipeline_worker state=restarted
when: not pipeline_worker|changed

View File

@ -0,0 +1,35 @@
## Main task file for stv3-workers role
# All the users, groups, directories and code are
# set up by the common role.
---
- name: yagi.conf
template: src=yagi.conf.j2 dest=/etc/stv3/yagi.conf
owner=stv3 group=stv3 mode=0644
notify:
- restart yagi-event
- name: swift_credentials.conf
template: src=swift_credentials.conf.j2 dest=/etc/stv3/swift_credentials.conf
owner=stv3 group=stv3 mode=0644
notify:
- restart yagi-event
- name: yagi-event-initd
copy: src=yagi-event.debian.init.d dest=/etc/init.d/yagi-event
owner=stv3 group=stv3 mode=0755
- name: pipeline-worker-initd
copy: src=pipeline-worker.debian.init.d
dest=/etc/init.d/pipeline_worker
owner=stv3 group=stv3 mode=0755
- name: yagi-event
debug: msg="Starting yagi-event"
notify:
- restart yagi-event
- name: pipeline-worker
debug: msg="Starting pipeline-worker"
notify:
- restart pipeline-worker

View File

@ -0,0 +1,4 @@
[rackspace_cloud]
username = {{ swift_username }}
api_key = {{ rax_api_key }}

View File

@ -0,0 +1,75 @@
[global]
verbose = True
debug = True
update_timer = 10
[event_worker]
pidfile = yagi_event_worker.pid
daemonize = False
event_driver = yagi.broker.rabbit.Broker
[rabbit_broker]
host = {{ rabbit_host }}
user = {{ rabbit_user }}
password = {{ rabbit_password }}
port = {{ rabbit_port }}
vhost = {{ rabbit_vhost }}
poll_delay = 1
exchange_name = "{{ rabbit_exchange }}"
[logging]
logfile = /var/log/stv3/yagi.log
default_level = {{ yagi_log_level }}
#config_file = /etc/stv3/logging.conf
[consumers]
queues = monitor.info
[consumer:monitor.info]
#apps = yagi.handler.shoebox_handler.ShoeboxHandler
apps = winchester.yagi_handler.WinchesterHandler
exchange = monitor
exchange_type = topic
routing_key = monitor.info
durable = True
max_messages = 100
[filters]
cufpub = compute.instance.exists.verified,compute.instance.exists
[nova]
nova_flavor_field_name = instance_type_id
[oahu]
config_class = .|oahu_config:Config
[winchester]
config_file = /etc/stv3/winchester.yaml
[shoebox]
# Store in-process files in ./working
# Move them to ./archive when full via the MoveFileCallback
# Roll files every 1mb
working_directory=data/working
destination_folder=data/archive
filename_template=events_%Y_%m_%d_%X_%f.dat
roll_checker=shoebox.roll_checker:SizeRollChecker
roll_size_mb=1
distiller_conf=distiller.conf
# Swift upload support
# create a credentials file (see shoebox/bin/sample_credentials.conf)
callback=shoebox.handlers:CallbackList
callback_list=shoebox.handlers:MoveFileCallback, shoebox.handlers:SwiftUploadCallback, shoebox.handlers:DeleteFileCallback
container=sandbox
credentials_file=swift_credentials.conf
auth_method=rackspace
region={{ swift_region }}
# If you don't want Swift support, comment the above callback=
# entry and uncomment this one:
#callback=shoebox.handlers:MoveFileCallback
# which will just move the file into the archive directory.

View File

21
ansible/workers.yaml Normal file
View File

@ -0,0 +1,21 @@
## Main entry point for the StackTach.v3 worker deployments
##
## Assumes an inventory value that has nodes or groups that start with
## "stv3-workers"
##
## Execution would look like:
## ansible-playbook workers.yaml -vv
##
## Assumes a stv3-db setup already exists.
---
- name: StackTach Workers Configurations
hosts: stv3-workers
remote_user: stacktach
sudo: yes
vars_files:
- ["vars/local_settings.yaml"]
roles:
- common
- stv3-workers

View File

@ -2,10 +2,39 @@
echo "StackTach dev env build script"
SOURCE_DIR=git
PACKAGE=false
TOX=false
DEPLOY=false
while getopts pdt opt; do
case $opt in
p)
PACKAGE=true
;;
t)
TOX=true
;;
d)
DEPLOY=true
;;
esac
done
shift $((OPTIND - 1))
DEV_DIR=git
PKG_DIR=dist
SOURCE_DIR=$DEV_DIR
VENV_DIR=.venv
PIPELINE_ENGINE=winchester
if [[ "$PACKAGE" = true ]]
then
SOURCE_DIR=$PKG_DIR
rm -rf $PKG_DIR
rm -rf $VENV_DIR
fi
if [[ -f local.sh ]]; then
source local.sh
fi
@ -30,6 +59,20 @@ for file in StackTach/notabene rackerlabs/yagi
do
git clone https://github.com/$file
done
if [[ "$TOX" = true ]]
then
for file in shoebox simport notification-utils \
stackdistiller winchester
do
cd stacktach-$file
set -e
tox
set +e
cd ..
done
fi
cd ..
source ./$VENV_DIR/bin/activate
@ -59,4 +102,26 @@ then
winchester_db -c winchester.yaml upgrade head
fi
if [[ "$PACKAGE" = true ]]
then
SHA=$(git log --pretty=format:'%h' -n 1)
mkdir dist
virtualenv --relocatable $VENV_DIR
mv $VENV_DIR dist/stv3
# Fix up the activate script to new location. --relocatable doesn't handle this.
cd dist/stv3/bin
sed -i "s/VIRTUAL_ENV=\".*\"/VIRTUAL_ENV=\"\/opt\/stv3\"/" activate
cd ../..
tar -zcvf ../stacktachv3_$SHA.tar.gz stv3
cd ..
echo "Release tarball in stacktachv3_$SHA.tar.gz"
if [[ "$DEPLOY" == true ]]
then
echo ansible-playbook db.yaml --extra-vars \"tarball_absolute_path=../stacktachv3_$SHA.tar.gz\" -vvv
echo ansible-playbook workers.yaml --extra-vars \"tarball_absolute_path=../stacktachv3_$SHA.tar.gz\" -vvv
echo ansible-playbook api.yaml --extra-vars \"tarball_absolute_path=../stacktachv3_$SHA.tar.gz\" -vvv
fi
else
screen -c screenrc.$PIPELINE_ENGINE
fi

View File

@ -26,10 +26,12 @@
service:
fields: publisher_id
plugin: split
flavor_id:
instance_flavor_id:
type: int
fields:
- payload.instance_flavor_id
- payload.image_meta.instance_type_flavor_id
- payload.instance_type_id
memory_mb:
type: int
fields: payload.memory_mb
@ -45,9 +47,6 @@
vcpus:
type: int
fields: payload.vcpus
instance_type_id:
type: int
fields: payload.instance_type_id
instance_type:
fields: payload.instance_type
state: