Merge pull request #236 from dshulyak/library_poc
Ceph-mon installation using fuel-library
This commit is contained in:
commit
2d56dc902f
24
examples/library_ceph/README.md
Normal file
24
examples/library_ceph/README.md
Normal file
@ -0,0 +1,24 @@
|
||||
Current example will do following things:
|
||||
|
||||
- fetch fuel-library from github
|
||||
- use ./update_modules.sh to fetch librarian dependencies
|
||||
- generate ceph keys on a solar-dev1
|
||||
- install ceph-mon on solar-dev1 (INPROGRESS)
|
||||
- install ceph-osd on solar-dev2 (TODO)
|
||||
- imlement removal mechanism for ceph-mon/ceph-osd (TODO)
|
||||
|
||||
|
||||
To use it:
|
||||
|
||||
```
|
||||
python exaples/library_ceph/ceph.py
|
||||
solar ch stage && solar ch process
|
||||
solar or run-once last -w 120
|
||||
```
|
||||
|
||||
If it will fail you can run particular resource action, with a lot of
|
||||
debug info.
|
||||
|
||||
```
|
||||
solar res action run ceph_mon1
|
||||
```
|
63
examples/library_ceph/ceph.py
Normal file
63
examples/library_ceph/ceph.py
Normal file
@ -0,0 +1,63 @@
|
||||
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
import yaml
|
||||
|
||||
db = get_db()
|
||||
|
||||
STORAGE = {'objects_ceph': True,
|
||||
'osd_pool_size': 2,
|
||||
'pg_num': 128}
|
||||
|
||||
KEYSTONE = {'admin_token': 'abcde'}
|
||||
|
||||
|
||||
NETWORK_SCHEMA = {
|
||||
'endpoints': {'eth1': {'IP': ['10.0.0.3/24']}},
|
||||
'roles': {'ceph/replication': 'eth1',
|
||||
'ceph/public': 'eth1'}
|
||||
}
|
||||
|
||||
NETWORK_METADATA = yaml.load("""
|
||||
solar-dev1:
|
||||
uid: '1'
|
||||
fqdn: solar-dev1
|
||||
network_roles:
|
||||
ceph/public: 10.0.0.3
|
||||
ceph/replication: 10.0.0.3
|
||||
node_roles:
|
||||
- ceph-mon
|
||||
name: solar-dev1
|
||||
|
||||
""")
|
||||
|
||||
|
||||
def deploy():
|
||||
db.clear()
|
||||
resources = vr.create('nodes', 'templates/nodes.yaml', {'count': 1})
|
||||
first_node = next(x for x in resources if x.name.startswith('node'))
|
||||
|
||||
library = vr.create('library1', 'resources/fuel_library', {})[0]
|
||||
first_node.connect(library)
|
||||
|
||||
keys = vr.create('ceph_key', 'resources/ceph_keys', {})[0]
|
||||
first_node.connect(keys)
|
||||
|
||||
ceph_mon = vr.create('ceph_mon1', 'resources/ceph_mon',
|
||||
{'storage': STORAGE,
|
||||
'keystone': KEYSTONE,
|
||||
'network_scheme': NETWORK_SCHEMA,
|
||||
'ceph_monitor_nodes': NETWORK_METADATA,
|
||||
'ceph_primary_monitor_node': NETWORK_METADATA,
|
||||
'role': 'controller',
|
||||
})[0]
|
||||
|
||||
keys.connect(ceph_mon, {})
|
||||
first_node.connect(ceph_mon,
|
||||
{'ip': ['ip', 'public_vip', 'management_vip']})
|
||||
library.connect(ceph_mon, {'puppet_modules': 'puppet_modules'})
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
deploy()
|
17
resources/ceph_keys/actions/run.sh
Normal file
17
resources/ceph_keys/actions/run.sh
Normal file
@ -0,0 +1,17 @@
|
||||
#!/bin/sh
|
||||
|
||||
BASE_PATH={{ target_directory }}
|
||||
KEY_NAME={{ key_name }}
|
||||
|
||||
function generate_ssh_keys {
|
||||
local dir_path=$BASE_PATH$KEY_NAME/
|
||||
local key_path=$dir_path$KEY_NAME
|
||||
mkdir -p $dir_path
|
||||
if [ ! -f $key_path ]; then
|
||||
ssh-keygen -b 2048 -t rsa -N '' -f $key_path 2>&1
|
||||
else
|
||||
echo 'Key $key_path already exists'
|
||||
fi
|
||||
}
|
||||
|
||||
generate_ssh_keys
|
14
resources/ceph_keys/meta.yaml
Normal file
14
resources/ceph_keys/meta.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
id: ceph_keys
|
||||
handler: shell
|
||||
version: 1.0.0
|
||||
input:
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
target_directory:
|
||||
schema: str!
|
||||
value: /var/lib/astute/
|
||||
key_name:
|
||||
schema: str!
|
||||
value: ceph
|
||||
tags: []
|
95
resources/ceph_mon/actions/run.pp
Normal file
95
resources/ceph_mon/actions/run.pp
Normal file
@ -0,0 +1,95 @@
|
||||
notice('MODULAR: ceph/mon.pp')
|
||||
|
||||
|
||||
$storage_hash = hiera('storage', {})
|
||||
$public_vip = hiera('public_vip')
|
||||
$management_vip = hiera('management_vip')
|
||||
$use_syslog = hiera('use_syslog', true)
|
||||
$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0')
|
||||
$keystone_hash = hiera('keystone', {})
|
||||
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
|
||||
|
||||
if ($storage_hash['images_ceph']) {
|
||||
$glance_backend = 'ceph'
|
||||
} elsif ($storage_hash['images_vcenter']) {
|
||||
$glance_backend = 'vmware'
|
||||
} else {
|
||||
$glance_backend = 'swift'
|
||||
}
|
||||
|
||||
if ($storage_hash['volumes_ceph'] or
|
||||
$storage_hash['images_ceph'] or
|
||||
$storage_hash['objects_ceph'] or
|
||||
$storage_hash['ephemeral_ceph']
|
||||
) {
|
||||
$use_ceph = true
|
||||
} else {
|
||||
$use_ceph = false
|
||||
}
|
||||
|
||||
if $use_ceph {
|
||||
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
|
||||
$primary_mons = keys($ceph_primary_monitor_node)
|
||||
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
|
||||
|
||||
prepare_network_config(hiera_hash('network_scheme'))
|
||||
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
|
||||
$ceph_public_network = get_network_role_property('ceph/public', 'network')
|
||||
$mon_addr = get_network_role_property('ceph/public', 'ipaddr')
|
||||
|
||||
class {'ceph':
|
||||
primary_mon => $primary_mon,
|
||||
mon_hosts => keys($mon_address_map),
|
||||
mon_ip_addresses => values($mon_address_map),
|
||||
mon_addr => $mon_addr,
|
||||
cluster_node_address => $public_vip,
|
||||
osd_pool_default_size => $storage_hash['osd_pool_size'],
|
||||
osd_pool_default_pg_num => $storage_hash['pg_num'],
|
||||
osd_pool_default_pgp_num => $storage_hash['pg_num'],
|
||||
use_rgw => false,
|
||||
glance_backend => $glance_backend,
|
||||
rgw_pub_ip => $public_vip,
|
||||
rgw_adm_ip => $management_vip,
|
||||
rgw_int_ip => $management_vip,
|
||||
cluster_network => $ceph_cluster_network,
|
||||
public_network => $ceph_public_network,
|
||||
use_syslog => $use_syslog,
|
||||
syslog_log_level => hiera('syslog_log_level_ceph', 'info'),
|
||||
syslog_log_facility => $syslog_log_facility_ceph,
|
||||
rgw_keystone_admin_token => $keystone_hash['admin_token'],
|
||||
ephemeral_ceph => $storage_hash['ephemeral_ceph']
|
||||
}
|
||||
|
||||
if ($storage_hash['volumes_ceph']) {
|
||||
include ::cinder::params
|
||||
service { 'cinder-volume':
|
||||
ensure => 'running',
|
||||
name => $::cinder::params::volume_service,
|
||||
hasstatus => true,
|
||||
hasrestart => true,
|
||||
}
|
||||
|
||||
service { 'cinder-backup':
|
||||
ensure => 'running',
|
||||
name => $::cinder::params::backup_service,
|
||||
hasstatus => true,
|
||||
hasrestart => true,
|
||||
}
|
||||
|
||||
Class['ceph'] ~> Service['cinder-volume']
|
||||
Class['ceph'] ~> Service['cinder-backup']
|
||||
}
|
||||
|
||||
if ($storage_hash['images_ceph']) {
|
||||
include ::glance::params
|
||||
service { 'glance-api':
|
||||
ensure => 'running',
|
||||
name => $::glance::params::api_service_name,
|
||||
hasstatus => true,
|
||||
hasrestart => true,
|
||||
}
|
||||
|
||||
Class['ceph'] ~> Service['glance-api']
|
||||
}
|
||||
|
||||
}
|
4
resources/ceph_mon/actions/test.pp
Normal file
4
resources/ceph_mon/actions/test.pp
Normal file
@ -0,0 +1,4 @@
|
||||
prepare_network_config(hiera_hash('network_scheme'))
|
||||
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
|
||||
|
||||
notify{"The value is: ${ceph_cluster_network}": }
|
38
resources/ceph_mon/meta.yaml
Normal file
38
resources/ceph_mon/meta.yaml
Normal file
@ -0,0 +1,38 @@
|
||||
id: ceph_mon
|
||||
handler: puppetv2
|
||||
version: 1.0.0
|
||||
input:
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
public_vip:
|
||||
schema: str!
|
||||
value:
|
||||
management_vip:
|
||||
schema: str!
|
||||
value:
|
||||
use_syslog:
|
||||
schema: bool
|
||||
value: true
|
||||
keystone:
|
||||
schema: {'admin_token': 'str'}
|
||||
value: {}
|
||||
ceph_monitor_nodes:
|
||||
schema: []
|
||||
value: []
|
||||
ceph_primary_monitor_node:
|
||||
schema: []
|
||||
value: []
|
||||
storage:
|
||||
schema: {}
|
||||
value: {}
|
||||
network_scheme:
|
||||
schema: {}
|
||||
value: {}
|
||||
role:
|
||||
schema: str!
|
||||
value:
|
||||
puppet_modules:
|
||||
schema: str!
|
||||
value:
|
||||
tags: []
|
20
resources/fuel_library/actions/run.sh
Normal file
20
resources/fuel_library/actions/run.sh
Normal file
@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
|
||||
mkdir -p {{temp_directory}}
|
||||
|
||||
pushd {{temp_directory}}
|
||||
if [ ! -d fuel-library ]
|
||||
then
|
||||
git clone -b {{ git['branch'] }} {{ git['repository'] }}
|
||||
else
|
||||
pushd ./fuel-library
|
||||
git pull
|
||||
popd
|
||||
fi
|
||||
pushd ./fuel-library/deployment
|
||||
./update_modules.sh
|
||||
popd
|
||||
|
||||
mkdir -p {{puppet_modules}}
|
||||
cp -r ./fuel-library/deployment/puppet/* {{puppet_modules}}
|
||||
popd
|
18
resources/fuel_library/meta.yaml
Normal file
18
resources/fuel_library/meta.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
id: fuel_library
|
||||
handler: shell
|
||||
version: 1.0.0
|
||||
input:
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
git:
|
||||
schema: {repository: str!, branch: str!}
|
||||
value: {repository: 'https://github.com/stackforge/fuel-library',
|
||||
branch: 'stable/7.0'}
|
||||
temp_directory:
|
||||
schema: str!
|
||||
value: /tmp/solar
|
||||
puppet_modules:
|
||||
schema: str!
|
||||
value: /etc/fuel/modules
|
||||
tags: []
|
@ -16,7 +16,7 @@
|
||||
from solar.core.handlers.ansible_template import AnsibleTemplate
|
||||
from solar.core.handlers.ansible_playbook import AnsiblePlaybook
|
||||
from solar.core.handlers.base import Empty
|
||||
from solar.core.handlers.puppet import Puppet
|
||||
from solar.core.handlers.puppet import Puppet, PuppetV2
|
||||
from solar.core.handlers.shell import Shell
|
||||
|
||||
|
||||
@ -24,7 +24,8 @@ HANDLERS = {'ansible': AnsibleTemplate,
|
||||
'ansible_playbook': AnsiblePlaybook,
|
||||
'shell': Shell,
|
||||
'puppet': Puppet,
|
||||
'none': Empty}
|
||||
'none': Empty,
|
||||
'puppetv2': PuppetV2}
|
||||
|
||||
def get(handler_name):
|
||||
handler = HANDLERS.get(handler_name, None)
|
||||
|
@ -103,13 +103,21 @@ class Puppet(TempFileHandler):
|
||||
|
||||
self.upload_manifests(resource)
|
||||
|
||||
action_file_name = '/tmp/{}.pp'.format(resource.name)
|
||||
self.prepare_templates_and_scripts(resource, action_file, '')
|
||||
self.transport_sync.copy(resource, action_file, '/tmp/action.pp')
|
||||
self.transport_sync.copy(resource, action_file, action_file_name)
|
||||
self.transport_sync.sync_all()
|
||||
|
||||
cmd_args = ['puppet', 'apply', '-vd',
|
||||
action_file_name,
|
||||
'--detailed-exitcodes']
|
||||
if 'puppet_modules' in resource.args:
|
||||
cmd_args.append('--modulepath={}'.format(
|
||||
resource.args['puppet_modules']))
|
||||
|
||||
cmd = self.transport_run.run(
|
||||
resource,
|
||||
'puppet', 'apply', '-vd', '/tmp/action.pp', '--detailed-exitcodes',
|
||||
*cmd_args,
|
||||
env={
|
||||
'FACTER_resource_name': resource.name,
|
||||
},
|
||||
@ -129,11 +137,12 @@ class Puppet(TempFileHandler):
|
||||
|
||||
return p.directory
|
||||
|
||||
def _make_args(self, resource):
|
||||
return {resource.name: resource.to_dict()}
|
||||
|
||||
def upload_hiera_resource(self, resource):
|
||||
with open('/tmp/puppet_resource.yaml', 'w') as f:
|
||||
f.write(yaml.dump({
|
||||
resource.name: resource.to_dict()
|
||||
}))
|
||||
f.write(yaml.safe_dump(self._make_args(resource)))
|
||||
|
||||
self.transport_sync.copy(
|
||||
resource,
|
||||
@ -146,7 +155,7 @@ class Puppet(TempFileHandler):
|
||||
def upload_manifests(self, resource):
|
||||
if 'forge' in resource.args and resource.args['forge']:
|
||||
self.upload_manifests_forge(resource)
|
||||
else:
|
||||
elif 'git' in resource.args and resource.args['git']:
|
||||
self.upload_manifests_librarian(resource)
|
||||
|
||||
def upload_manifests_forge(self, resource):
|
||||
@ -201,3 +210,10 @@ class Puppet(TempFileHandler):
|
||||
'/tmp/{}/*'.format(os.path.split(manifests_path)[1]),
|
||||
module_directory
|
||||
)
|
||||
|
||||
|
||||
class PuppetV2(Puppet):
|
||||
|
||||
def _make_args(self, resource):
|
||||
return resource.args
|
||||
|
||||
|
@ -13,7 +13,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from fabric import api as fabric_api
|
||||
from solar.core.log import log
|
||||
from solar import errors
|
||||
|
||||
from solar.core.handlers.base import TempFileHandler
|
||||
|
||||
@ -21,4 +22,20 @@ from solar.core.handlers.base import TempFileHandler
|
||||
class Shell(TempFileHandler):
|
||||
def action(self, resource, action_name):
|
||||
action_file = self._compile_action_file(resource, action_name)
|
||||
fabric_api.local('bash {}'.format(action_file))
|
||||
log.debug('action_file: %s', action_file)
|
||||
|
||||
action_file_name = '/tmp/{}.sh'.format(resource.name)
|
||||
self.transport_sync.copy(resource, action_file, action_file_name)
|
||||
self.transport_sync.sync_all()
|
||||
cmd = self.transport_run.run(
|
||||
resource,
|
||||
'bash', action_file_name,
|
||||
use_sudo=True,
|
||||
warn_only=True
|
||||
)
|
||||
|
||||
if cmd.return_code:
|
||||
raise errors.SolarError(
|
||||
'Bash execution for {} failed with {}'.format(
|
||||
resource.name, cmd.return_code))
|
||||
return cmd
|
||||
|
@ -44,7 +44,7 @@ class GitProvider(BaseProvider):
|
||||
super(GitProvider, self).__init__(*args, **kwargs)
|
||||
|
||||
self.repository = repository
|
||||
self.branch = 'master'
|
||||
self.branch = branch
|
||||
self.path = path
|
||||
|
||||
directory = self._directory()
|
||||
|
Loading…
x
Reference in New Issue
Block a user