Merge branch 'feature/cluster'

Conflicts:
	data_bags/mon_agent/mon_agent.json
This commit is contained in:
Tim Kuhlman 2014-05-27 11:42:58 -06:00
commit 57e1bf5084
20 changed files with 354 additions and 14 deletions

1
.gitignore vendored
View File

@ -7,3 +7,4 @@ hlinux/templates/packer_cache
.idea .idea
*.deb *.deb
*.pyc *.pyc
utils/cluster/data_bags/vertica/ssh_key.json

View File

@ -6,6 +6,6 @@
"api" : { "api" : {
"mon_api_url" : "http://localhost:8080/v2.0", "mon_api_url" : "http://localhost:8080/v2.0",
"mon_api_project_id" : "82510970543135", "mon_api_project_id" : "82510970543135",
"use_keystone" : "false", "use_keystone" : "false"
} }
} }

View File

@ -1,10 +1,8 @@
{ {
"id": "mon_credentials", "id": "mon_credentials",
"middleware": { "middleware": {
"keystore_password": "changeit",
"serverVip": "region-a.geo-1.identity-admin.hpcloudsvc.com", "serverVip": "region-a.geo-1.identity-admin.hpcloudsvc.com",
"truststore_password": "changeit", "truststore_password": "changeit"
"keystore_file":"hpmiddleware-keystore-production.jks"
}, },
"mysql": { "mysql": {
"hostname": "localhost", "hostname": "localhost",

View File

@ -4,6 +4,7 @@
"json_class": "Chef::Role", "json_class": "Chef::Role",
"default_attributes": { "default_attributes": {
"kafka": { "kafka": {
"listen_interface": "eth1",
"topics": { "topics": {
"metrics": { "replicas": 1, "partitions": 4 }, "metrics": { "replicas": 1, "partitions": 4 },
"events": { "replicas": 1, "partitions": 4 }, "events": { "replicas": 1, "partitions": 4 },

1
utils/__init__.py Normal file
View File

@ -0,0 +1 @@
__author__ = 'kuhlmant'

View File

@ -50,6 +50,25 @@ data_bag_path "{dir}/data_bags"'''.format(dir=chef_dir)
sudo('chef-solo -c {dir}/solo.rb -j {dir}/node.json'.format(dir=chef_dir)) sudo('chef-solo -c {dir}/solo.rb -j {dir}/node.json'.format(dir=chef_dir))
@task
def git_mini_mon(install_dir, branch=None, proxy=None):
"""Download mini-mon from git
"""
with prefix(proxy_string(proxy)):
# Update the install dir if it already has code, otherwise check out
with settings(hide('running', 'output', 'warnings'), warn_only=True):
install_dir_check = run('ls %s' % install_dir)
if install_dir_check.succeeded:
with cd(install_dir):
sudo('git checkout master; git pull -f origin master')
else:
sudo('git clone https://github.com/hpcloud-mon/mon-vagrant.git %s' % install_dir)
if branch is not None:
with cd(install_dir):
sudo('git checkout %s' % branch)
@task(default=True) @task(default=True)
def install(install_dir='/vagrant', proxy=None): def install(install_dir='/vagrant', proxy=None):
"""Installs the latest mini-mon and bits necessary to run chef-solo and runs chef-solo on the box. """Installs the latest mini-mon and bits necessary to run chef-solo and runs chef-solo on the box.
@ -60,19 +79,10 @@ def install(install_dir='/vagrant', proxy=None):
if proxy is not None: if proxy is not None:
abort('Proxy support is incomplete.') abort('Proxy support is incomplete.')
execute(install_deps, proxy) execute(install_deps, proxy)
execute(git_mini_mon, install_dir, proxy)
#Clone mini-mon #Clone mini-mon
with prefix(proxy_string(proxy)): with prefix(proxy_string(proxy)):
# Update the install dir if it already has code, otherwise check out
with settings(hide('running', 'output', 'warnings'), warn_only=True):
install_dir_check = run('ls %s' % install_dir)
if install_dir_check.succeeded:
with cd(install_dir):
sudo('git pull -f origin master')
else:
sudo('git clone https://github.com/hpcloud-mon/mon-vagrant.git %s' % install_dir)
# download cookbooks # download cookbooks
with cd(install_dir): with cd(install_dir):
sudo('berks vendor') sudo('berks vendor')

28
utils/cluster/README.md Normal file
View File

@ -0,0 +1,28 @@
# Setup of a test cluster
The goal of this fabric script is to setup a test cluster on baremetal leveraging some tools from mini-mon.
##Steps
- Before running first setup the following settings for your test cluster:
- keystone host and project_id in data_bags/mon_agent/mon_agent.json
- keystone host(serverVip) in data_bags/mon_api/mon_credentials.json
- wsrep address in the Mon-Node role
- servers in data_bags/zookeeper/mon.json
- servers in data_bags/kafka/mon.json
- vertica data bags in data_bags/vertica
- ssh_key.json with two fields, public/private corresponding to public/private ssh keys
- nodes data bag
- From the utils directory (or specifying that fabfile) start the install script
- `fab cluster.setup -H host1,host2,host3`
- create kafka topics - kafka::create_topics recipe can be run on 1 machine
- Setup the Vertica database schema - the vertica::create_db recipe and scripts can be used for reference but won't work for a cluster
- The problems with the script are:
- The create_db command needs all ips specified for the -s arg, they are comma seperated
- The symbolic linking of ssl cert/key needs to be done on each node
- The restart policy should be set to ksafe rather than always
- Setup the mysql database schema - the mini-mon::mysql_schema tecipe can be run on 1 machine
- Restart any services which require vertica, mysql or kafka
## Optional Configuration
- Add in the Vertica Console to one of the machines. This can be done with vertica::console recipe

78
utils/cluster/__init__.py Normal file
View File

@ -0,0 +1,78 @@
#!/usr/bin/env python
#
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fabric Tasks for installing a cluster monitoring stack on baremetal
These tasks were developed for hLinux but will likely work on any decently up to date debian based distro
"""
from fabric.api import *
from fabric.tasks import Task
import os
from baremetal import chef_solo, git_mini_mon, install_deps
__all__ = ['setup']
class SetupCluster(Task):
def __init__(self):
"""Setup a cluster running monitoring.
"""
self.cluster_dir = '/var/tmp/chef-Mon-Node'
self.cluster_hosts = None
self.mini_mon_dir = '/vagrant' # mini_mon_dir is /vagrant to match assumptions in mini-mon
self.vertica_packages = ['vertica_7.0.1-0_amd64.deb', 'vertica-r-lang_7.0.1-0_amd64.deb']
def run(self):
"""Installs the latest cookbooks and dependencies to run chef-solo and runs chef-solo on each box.
The data bags in the cluster subdir should be properly setup for the environment before running.
"""
self.cluster_hosts = env.hosts
execute(install_deps)
execute(git_mini_mon, self.mini_mon_dir, 'feature/cluster')
# download cookbooks
with settings(hide('running', 'output', 'warnings'), warn_only=True):
sudo('rm -r %s' % self.cluster_dir)
sudo('mkdir %s' % self.cluster_dir)
with cd(self.mini_mon_dir):
with settings(hide('running', 'output', 'warnings'), warn_only=True):
berks_check = sudo('ls Berksfile.lock')
if berks_check.succeeded:
sudo('berks update')
else:
sudo('berks install')
sudo('berks vendor %s/berks-cookbooks' % self.cluster_dir)
# the vertica packages from my.vertica.com are needed, this assumes they are one level up from cwd
for deb in self.vertica_packages:
with settings(hide('running', 'output', 'warnings'), warn_only=True):
if run('ls %s/%s' %(self.mini_mon_dir, deb)).failed:
puts('Uploading %s' % deb)
put('../../vertica*.deb', self.mini_mon_dir, use_sudo=True)
# Copy roles and data bags - assumes you are running from the utils directory
put('%s/cluster/data_bags' % os.path.dirname(env.real_fabfile), self.cluster_dir, use_sudo=True)
put('%s/cluster/roles' % os.path.dirname(env.real_fabfile), self.cluster_dir, use_sudo=True)
execute(chef_solo, self.cluster_dir, "role[Mon-Node]")
setup = SetupCluster()

View File

@ -0,0 +1,8 @@
{
"id" : "mon",
"brokers": {
"mon-ae1test-kafka0001.useast.hpcloud.net" : { "id": 0, "ip": "10.22.156.14" },
"mon-ae1test-kafka0002.useast.hpcloud.net" : { "id": 1, "ip": "10.22.156.15" },
"mon-ae1test-kafka0003.useast.hpcloud.net" : { "id": 2, "ip": "10.22.156.16" }
}
}

View File

@ -0,0 +1,13 @@
{
"id": "mon_agent",
"dimensions": "Environment:Dev",
"log_level": "INFO",
"api_key": "",
"send_to_mon_api": "true",
"api": {
"mon_api_url": "http://localhost:8080/v2.0/metrics",
"mon_api_project_id": "82510970543135",
"use_keystone": "True",
"keystone_url": "https://10.22.156.20:35357/v3/auth/tokens"
}
}

View File

@ -0,0 +1,17 @@
{
"id" : "mon_api",
"vertica" : {
"dbname" : "mon",
"hostname" : "localhost"
},
"zookeeper" : {
"hostname" : "localhost"
},
"mysql": {
"hostname":"localhost",
"schema": "mon"
},
"kafka": {
"hostname": "10.22.156.16"
}
}

View File

@ -0,0 +1,19 @@
{
"id": "mon_credentials",
"middleware": {
"serverVip": "10.22.156.20",
"truststore_password": "changeit"
},
"mysql": {
"hostname": "localhost",
"username": "monapi",
"password": "password",
"schema": "mon"
},
"vertica": {
"hostname": "localhost",
"username": "mon_api",
"password": "password",
"schema": "mon"
}
}

View File

@ -0,0 +1,7 @@
{
"id" : "hosts",
"kafka": "10.22.156.16",
"mysql": "localhost",
"smtp": "localhost",
"zookeeper": "localhost"
}

View File

@ -0,0 +1,7 @@
{
"id" : "mon_credentials",
"vertica" : {
"user" : "dbadmin",
"password" : "password"
}
}

View File

@ -0,0 +1,33 @@
{
"id": "mon_persister",
"kafka": {
"metrics_topic": "metrics",
"num_threads": "1",
"group_id": "1",
"consumer_id": {
"mon-ae1test-kafka0001.useast.hpcloud.net": 0,
"mon-ae1test-kafka0002.useast.hpcloud.net": 1,
"mon-ae1test-kafka0003.useast.hpcloud.net": 2
}
},
"disruptor": {
"buffer_size": "1048576",
"num_processors": "1"
},
"vertica_output_processor": {
"batch_size": "10000"
},
"deduper_config": {
"dedupe_run_frequencey_seconds": "30"
},
"vertica_metric_repository_config": {
"max_cache_size": "2000000"
},
"vertica": {
"dbname": "mon",
"hostname": "localhost"
},
"zookeeper": {
"hostname": "localhost"
}
}

View File

@ -0,0 +1,22 @@
{
"id" : "mon_thresh",
"kafka": {
"metric": {
"group": "thresh-metric",
"topic": "metrics"
},
"event": {
"group": "thresh-event",
"host": "localhost:9092",
"consumer_topic": "events",
"producer_topic": "alarm-state-transitions"
}
},
"mysql": {
"db": "mon",
"host": "localhost:3306"
},
"zookeeper": {
"host": "localhost:2181"
}
}

View File

@ -0,0 +1,23 @@
{
"id": "nodes",
"nodes": {
"mon-ae1test-kafka0001.useast.hpcloud.net" : {
"ip": "10.22.156.14",
"broadcast": "10.22.156.255",
"network": "10.22.156.0",
"netmask": "255.255.255.0"
},
"mon-ae1test-kafka0002.useast.hpcloud.net" : {
"ip": "10.22.156.15",
"broadcast": "10.22.156.255",
"network": "10.22.156.0",
"netmask": "255.255.255.0"
},
"mon-ae1test-kafka0003.useast.hpcloud.net" : {
"ip": "10.22.156.16",
"broadcast": "10.22.156.255",
"network": "10.22.156.0",
"netmask": "255.255.255.0"
}
}
}

View File

@ -0,0 +1,8 @@
{
"id" : "mon",
"servers": {
"mon-ae1test-kafka0001.useast.hpcloud.net" : { "id": 0, "ip": "10.22.156.14" },
"mon-ae1test-kafka0002.useast.hpcloud.net" : { "id": 1, "ip": "10.22.156.15" },
"mon-ae1test-kafka0003.useast.hpcloud.net" : { "id": 2, "ip": "10.22.156.16" }
}
}

View File

@ -0,0 +1,65 @@
{
"name": "Mon-Node",
"description": "Sets up one box in a Monitoring Cluster",
"json_class": "Chef::Role",
"default_attributes": {
"apt": {
"periodic_update_min_delay": 60
},
"kafka": {
"cluster": "mon",
"listen_interface": "eth2",
"topics": {
"metrics": { "replicas": 3, "partitions": 64 },
"events": { "replicas": 3, "partitions": 12 },
"alarm-state-transitions": { "replicas": 3, "partitions": 12 },
"alarm-notifications": { "replicas": 3, "partitions": 12 }
}
},
"percona": {
"backup": {
"password": "password"
},
"cluster": {
"package": "percona-xtradb-cluster-56",
"wsrep_cluster_address": "gcomm://10.22.156.14,10.22.156.15,10.22.156.16",
"wsrep_cluster_name": "mon",
"wsrep_sst_method": "rsync",
"wsrep_provider": "/usr/lib/libgalera_smm.so"
},
"main_config_file": "/etc/mysql/my.cnf",
"server": {
"bind_address": "0.0.0.0",
"replication": {
"password": "password"
},
"role": "cluster",
"root_password": "password",
"skip_name_resolve": true
}
},
"vertica": {
"cluster": true
},
"zookeeper": {
"cluster": "mon"
}
},
"override_attributes": {
},
"chef_type": "role",
"run_list": [
"recipe[mini-mon]",
"recipe[percona::cluster]",
"recipe[zookeeper]",
"recipe[kafka]",
"recipe[mini-mon::postfix]",
"recipe[mon_notification]",
"recipe[vertica]",
"recipe[sysctl]",
"recipe[mon_api]",
"recipe[mon_persister]"
],
"env_run_lists": {
}
}

1
utils/fabfile.py vendored
View File

@ -1 +1,2 @@
import baremetal import baremetal
import cluster