commit df6c42aedbcdddcd178bc1efd6862b6f2eb01826 Author: Alessandro Pilotti Date: Sat Dec 1 23:50:15 2012 +0200 Initial commit diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..51ab4183 --- /dev/null +++ b/.gitignore @@ -0,0 +1,37 @@ +*.DS_Store +*.egg* +*.log +*.mo +*.pyc +*.swo +*.swp +*.sqlite +.autogenerated +.coverage +.nova-venv +.project +.pydevproject +.ropeproject +.tox +.venv +AUTHORS +Authors +build-stamp +build/* +CA/ +ChangeLog +coverage.xml +cover/* +covhtml +dist/* +doc/source/api/* +doc/build/* +instances +keeper +keys +local_settings.py +MANIFEST +nosetests.xml +nova/tests/cover/* +nova/vcsversion.py +tools/conf/nova.conf* diff --git a/README.rst b/README.rst new file mode 100644 index 00000000..14e936c8 --- /dev/null +++ b/README.rst @@ -0,0 +1,89 @@ +Portable OpenStack Cloud Initialization Service +=============================================== + +The main goal of this project is to bring the benefits of cloud-init to guests running a wide range of OSs. +The first release provides support for Windows OSs, but due to the modular and decoupled architecture of the service, plugins for any OS can be easily added. + +The features available with the first release include HTTP and ConfigDriveV2 metadata services and plugins for: +hostname, user creation, group membership, static networking, SSH user's public keys, user_data custom scripts running in various shells (CMD.exe / Powershell / bash) + +There's no limitation in the type of supported Hypervisor. This service can be used on instances running on Hyper-V, KVM, Xen, ESXi, etc + +Documentation, support and contacts: http://www.cloudbase.it + + +Metatada services +----------------- + +A metadata service has the role of pulling the metadata configuration information. +ConfigDriveV2 and HTTP are supported out of the box, but other sources can be easily added. + + +Plugins +------- + +Plugins execute actions based on the metadata obtained by the service. + +Currently the following plugins have been implemented for the Windows OS: + + +cloudbaseinit.plugins.windows.sethostname.SetHostNamePlugin + +Sets the instance's hostname + + +cloudbaseinit.plugins.windows.createuser.CreateUserPlugin + +Creates / updates a user setting the password provided in the metadata (admin_pass) if available. +The user is then added to a set of provided local groups. +The following configuration parameters control the behaviour of this plugin: + +username +default: Admin + +groups +Comma separated list of groups. Default: Administrators + +inject_user_password +Can be set to false to avoid the injection of the password provided in the metadata. Default: True + + +cloudbaseinit.plugins.windows.networkconfig.NetworkConfigPlugin + +Configures static networking. + +network_adapter +Network adapter to configure. If not specified, the first available ethernet adapter will be chosen. Default: None + + +cloudbaseinit.plugins.windows.sshpublickeys.SetUserSSHPublicKeysPlugin + +Creates an "authorized_keys" file in the user's home directory containing the SSH keys provided in the metadata. +Note: on Windows a SSH service needs to be installed to take advantage of this feature. + + +cloudbaseinit.plugins.windows.userdata.UserDataPlugin + +Executes custom scripts provided with the user_data metadata as plain text or compressed with Gzip. + +Supported formats: + +Windows batch + +The file is executed in a cmd.exe shell (can be changed with the COMSPEC environment variable). +The user_data first line must be: +rem cmd + +Powershell + +Scripting is automatically enabled if not set (RemoteSigned). +The user_data first line must be: +#ps1 + +Bash +A bash shell needs to be installed in the system and available in the PATH in order to use this feature. +The user_data first line must start with: +#! + + + diff --git a/cloudbaseinit/__init__.py b/cloudbaseinit/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloudbaseinit/init.py b/cloudbaseinit/init.py new file mode 100644 index 00000000..7644e71f --- /dev/null +++ b/cloudbaseinit/init.py @@ -0,0 +1,57 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from cloudbaseinit.metadata.factory import * +from cloudbaseinit.openstack.common import cfg +from cloudbaseinit.openstack.common import log as logging +from cloudbaseinit.osutils.factory import * +from cloudbaseinit.plugins.factory import * +from cloudbaseinit.utils import * + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class InitManager(object): + def configure_host(self): + osutils = OSUtilsFactory().get_os_utils() + plugins = PluginFactory().load_plugins() + service = MetadataServiceFactory().get_metadata_service() + LOG.info('Metadata service loaded: \'%s\'' % + service.__class__.__name__) + + reboot_required = False + try: + for plugin in plugins: + plugin_name = plugin.__class__.__name__ + LOG.info('Executing plugin \'%(plugin_name)s\'' % locals()) + try: + plugin_requires_reboot = plugin.execute(service) + if plugin_requires_reboot: + reboot_required = True + except Exception, ex: + LOG.error('plugin \'%(plugin_name)s\' failed ' + 'with error \'%(ex)s\'' % locals()) + finally: + service.cleanup() + + if reboot_required: + try: + osutils.reboot() + except Exception, ex: + LOG.error('reboot failed with error \'%s\'' % ex) diff --git a/cloudbaseinit/metadata/__init__.py b/cloudbaseinit/metadata/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloudbaseinit/metadata/factory.py b/cloudbaseinit/metadata/factory.py new file mode 100644 index 00000000..ef8cc595 --- /dev/null +++ b/cloudbaseinit/metadata/factory.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cloudbaseinit.openstack.common import cfg +from cloudbaseinit.utils import * + +opts = [ + cfg.ListOpt('metadata_services', default=[ + 'cloudbaseinit.metadata.services.configdrive.configdrive.' + 'ConfigDriveService' + ], + help='List of enabled metadata service classes, ' + 'to be tested fro availability in the provided order. ' + 'The first available service will be used to retrieve metadata') + ] + +CONF = cfg.CONF +CONF.register_opts(opts) + + +class MetadataServiceFactory(object): + def get_metadata_service(self): + # Return the first service that loads correctly + utils = Utils() + for class_path in CONF.metadata_services: + service = utils.load_class(class_path)() + if service.load(): + return service + raise Exception("No available service found") diff --git a/cloudbaseinit/metadata/services/__init__.py b/cloudbaseinit/metadata/services/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloudbaseinit/metadata/services/base.py b/cloudbaseinit/metadata/services/base.py new file mode 100644 index 00000000..7a3dda54 --- /dev/null +++ b/cloudbaseinit/metadata/services/base.py @@ -0,0 +1,57 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import logging +import os +import posixpath + +LOG = logging.getLogger(__name__) + + +class BaseMetadataService(object): + def load(self): + self._cache = {} + + def _get_data(self, path): + pass + + def _get_cache_data(self, path): + if path in self._cache: + LOG.debug('Using cached copy of metadata: \'%s\'' % path) + return self._cache[path] + else: + data = self._get_data(path) + self._cache[path] = data + return data + + def get_content(self, data_type, name): + path = posixpath.normpath( + posixpath.join(data_type, 'content', name)) + return self._get_cache_data(path) + + def get_user_data(self, data_type, version='latest'): + path = posixpath.normpath( + posixpath.join(data_type, version, 'user_data')) + return self._get_cache_data(path) + + def get_meta_data(self, data_type, version='latest'): + path = posixpath.normpath( + posixpath.join(data_type, version, 'meta_data.json')) + return json.loads(self._get_cache_data(path)) + + def cleanup(self): + pass diff --git a/cloudbaseinit/metadata/services/configdrive/__init__.py b/cloudbaseinit/metadata/services/configdrive/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloudbaseinit/metadata/services/configdrive/configdrive.py b/cloudbaseinit/metadata/services/configdrive/configdrive.py new file mode 100644 index 00000000..8abddde7 --- /dev/null +++ b/cloudbaseinit/metadata/services/configdrive/configdrive.py @@ -0,0 +1,67 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import logging +import os +import shutil +import tempfile +import uuid + +from cloudbaseinit.metadata.services.base import * +from cloudbaseinit.openstack.common import cfg +from manager import * + +opts = [ + cfg.BoolOpt('config_drive_raw_hhd', default=True, + help='Look for an ISO config drive in raw HDDs'), + cfg.BoolOpt('config_drive_cdrom', default=True, + help='Look for a config drive in the attached cdrom drives'), + ] + +CONF = cfg.CONF +CONF.register_opts(opts) + +LOG = logging.getLogger(__name__) + + +class ConfigDriveService(BaseMetadataService): + def __init__(self): + self._metadata_path = None + + def load(self): + super(ConfigDriveService, self).load() + + target_path = os.path.join(tempfile.gettempdir(), str(uuid.uuid4())) + + mgr = ConfigDriveManager() + found = mgr.get_config_drive_files(target_path, + CONF.config_drive_raw_hhd, CONF.config_drive_cdrom) + if found: + self._metadata_path = target_path + LOG.debug('Metadata copied to folder: \'%s\'' % self._metadata_path) + return found + + def _get_data(self, path): + norm_path = os.path.normpath(os.path.join(self._metadata_path, path)) + with open(norm_path, 'rb') as f: + return f.read() + + def cleanup(self): + if self._metadata_path: + LOG.debug('Deleting metadata folder: \'%s\'' % self._metadata_path) + shutil.rmtree(self._metadata_path, True) + self._metadata_path = None diff --git a/cloudbaseinit/metadata/services/configdrive/manager.py b/cloudbaseinit/metadata/services/configdrive/manager.py new file mode 100644 index 00000000..0e1e9dfe --- /dev/null +++ b/cloudbaseinit/metadata/services/configdrive/manager.py @@ -0,0 +1,189 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ctypes +import logging +import os +import shutil +import sys +import tempfile +import uuid +import wmi + +from ctypes import wintypes + +from windows.disk.physical_disk import * +from windows.disk.virtual_disk import * + +LOG = logging.getLogger(__name__) + + +class ConfigDriveManager(object): + def _get_physical_disks_path(self): + l = [] + conn = wmi.WMI(moniker='//./root/cimv2') + q = conn.query('SELECT DeviceID FROM Win32_DiskDrive') + for r in q: + l.append(r.DeviceID) + return l + + def _get_config_drive_cdrom_mount_point(self): + conn = wmi.WMI(moniker='//./root/cimv2') + q = conn.query('SELECT Drive FROM Win32_CDROMDrive WHERE MediaLoaded = True') + for r in q: + drive = r.Drive + '\\' + q1 = conn.query('SELECT Label FROM Win32_Volume WHERE Name = \'%(drive)s\'' % locals()) + for r1 in q1: + if r1.Label == "config-2" and \ + os.path.exists(os.path.join(drive, 'openstack\\latest\meta_data.json')): + return drive + return None + + def _c_char_array_to_c_ushort(self, buf, offset): + low = ctypes.cast(buf[offset], + ctypes.POINTER(wintypes.WORD)).contents + high = ctypes.cast(buf[offset + 1], + ctypes.POINTER(wintypes.WORD)).contents + return (high.value << 8) + low.value + + def _get_iso_disk_size(self, phys_disk): + geom = phys_disk.get_geometry() + + if geom.MediaType != Win32_DiskGeometry.FixedMedia: + return None + + disk_size = geom.Cylinders * geom.TracksPerCylinder * \ + geom.SectorsPerTrack * geom.BytesPerSector + + boot_record_off = 0x8000; + id_off = 1; + volume_size_off = 80; + block_size_off = 128; + iso_id = 'CD001' + + offset = boot_record_off / geom.BytesPerSector * geom.BytesPerSector + bytes_to_read = geom.BytesPerSector + + if disk_size <= offset + bytes_to_read: + return None + + phys_disk.seek(offset) + (buf, bytes_read) = phys_disk.read(bytes_to_read) + + buf_off = boot_record_off - offset + id_off + if iso_id != buf[buf_off : buf_off + len(iso_id)]: + return None + + buf_off = boot_record_off - offset + volume_size_off + num_blocks = self._c_char_array_to_c_ushort(buf, buf_off) + + buf_off = boot_record_off - offset + block_size_off + block_size = self._c_char_array_to_c_ushort(buf, buf_off) + + return num_blocks * block_size + + def _write_iso_file(self, phys_disk, path, iso_file_size): + with open(path, 'wb') as f: + geom = phys_disk.get_geometry() + disk_size = geom.Cylinders * geom.TracksPerCylinder * \ + geom.SectorsPerTrack * geom.BytesPerSector + + offset = 0 + # Get a multiple of the sector byte size + bytes_to_read = 4096 / geom.BytesPerSector * geom.BytesPerSector + + while offset < iso_file_size: + phys_disk.seek(offset) + bytes_to_read = min(bytes_to_read, iso_file_size - offset) + (buf, bytes_read) = phys_disk.read(bytes_to_read) + f.write(buf) + offset += bytes_read + + def _copy_iso_files(self, iso_file_path, target_path): + virt_disk = VirtualDisk(iso_file_path) + virt_disk.open() + try: + virt_disk.attach() + cdrom_mount_point = virt_disk.get_cdrom_drive_mount_point() + shutil.copytree(cdrom_mount_point, target_path) + finally: + try: + virt_disk.detach() + except: + pass + virt_disk.close() + + def _extract_iso_disk_file(self, iso_file_path): + iso_disk_found = False + for path in self._get_physical_disks_path(): + phys_disk = PhysicalDisk(path) + try: + phys_disk.open() + iso_file_size = self._get_iso_disk_size(phys_disk) + if iso_file_size: + self._write_iso_file(phys_disk, iso_file_path, + iso_file_size) + iso_disk_found = True + break + except: + # Ignore exception + pass + finally: + phys_disk.close() + return iso_disk_found + + def _os_supports_iso_virtual_disks(self): + # Feature supported starting from Windows 8 / 2012 + ver = sys.getwindowsversion(); + supported = (ver[0] >= 6 and ver[1] >= 2) + if not supported: + LOG.debug('ISO virtual disks are not supported on ' + 'this version of Windows') + return supported + + def get_config_drive_files(self, target_path, check_raw_hhd=True, check_cdrom=True): + config_drive_found = False + if check_raw_hhd and self._os_supports_iso_virtual_disks(): + LOG.debug('Looking for Config Drive in raw HDDs') + config_drive_found = self._get_conf_drive_from_raw_hdd( + target_path) + + if not config_drive_found and check_cdrom: + LOG.debug('Looking for Config Drive in cdrom drives') + config_drive_found = self._get_conf_drive_from_cdrom_drive( + target_path) + return config_drive_found + + def _get_conf_drive_from_cdrom_drive(self, target_path): + cdrom_mount_point = self._get_config_drive_cdrom_mount_point() + if cdrom_mount_point: + shutil.copytree(cdrom_mount_point, target_path) + return True + return False + + def _get_conf_drive_from_raw_hdd(self, target_path): + config_drive_found = False + iso_file_path = os.path.join(tempfile.gettempdir(), + str(uuid.uuid4()) + '.iso') + try: + if self._extract_iso_disk_file(iso_file_path): + self._copy_iso_files(iso_file_path, target_path) + config_drive_found = True + finally: + if os.path.exists(iso_file_path): + os.remove(iso_file_path) + return config_drive_found + diff --git a/cloudbaseinit/metadata/services/configdrive/windows/__init__.py b/cloudbaseinit/metadata/services/configdrive/windows/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloudbaseinit/metadata/services/configdrive/windows/disk/__init__.py b/cloudbaseinit/metadata/services/configdrive/windows/disk/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloudbaseinit/metadata/services/configdrive/windows/disk/physical_disk.py b/cloudbaseinit/metadata/services/configdrive/windows/disk/physical_disk.py new file mode 100644 index 00000000..3e424861 --- /dev/null +++ b/cloudbaseinit/metadata/services/configdrive/windows/disk/physical_disk.py @@ -0,0 +1,107 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ctypes + +from ctypes import windll +from ctypes import wintypes + +kernel32 = windll.kernel32 + + +class Win32_DiskGeometry(ctypes.Structure): + FixedMedia = 12 + + _fields_ = [ + ('Cylinders', wintypes.LARGE_INTEGER), + ('MediaType', wintypes.DWORD), + ('TracksPerCylinder', wintypes.DWORD), + ('SectorsPerTrack', wintypes.DWORD), + ('BytesPerSector', wintypes.DWORD), + ] + + +class PhysicalDisk(object): + GENERIC_READ = 0x80000000 + FILE_SHARE_READ = 1 + OPEN_EXISTING = 3 + FILE_ATTRIBUTE_READONLY = 1 + INVALID_HANDLE_VALUE = -1 + IOCTL_DISK_GET_DRIVE_GEOMETRY = 0x70000 + FILE_BEGIN = 0 + INVALID_SET_FILE_POINTER = 0xFFFFFFFFL + + def __init__(self, path): + self._path = path + self._handle = 0 + self._geom = None + + def open(self): + if self._handle: + self.close() + + handle = kernel32.CreateFileW( + ctypes.c_wchar_p(self._path), + self.GENERIC_READ, + self.FILE_SHARE_READ, + 0, + self.OPEN_EXISTING, + self.FILE_ATTRIBUTE_READONLY, + 0) + if handle == self.INVALID_HANDLE_VALUE: + raise Exception('Cannot open file') + self._handle = handle + + def close(self): + kernel32.CloseHandle(self._handle) + self._handle = 0 + self._geom = None + + def get_geometry(self): + if not self._geom: + geom = Win32_DiskGeometry() + bytes_returned = wintypes.DWORD() + ret_val = kernel32.DeviceIoControl( + self._handle, + self.IOCTL_DISK_GET_DRIVE_GEOMETRY, + 0, + 0, + ctypes.byref(geom), + ctypes.sizeof(geom), + ctypes.byref(bytes_returned), + 0) + if not ret_val: + raise Exception("Cannot get disk geometry") + self._geom = geom + return self._geom + + def seek(self, offset): + high = wintypes.DWORD(offset >> 32) + low = wintypes.DWORD(offset & 0xFFFFFFFFL) + + ret_val = kernel32.SetFilePointer(self._handle, low, + ctypes.byref(high), self.FILE_BEGIN) + if ret_val == self.INVALID_SET_FILE_POINTER: + raise Exception("Seek error") + + def read(self, bytes_to_read): + buf = ctypes.create_string_buffer(bytes_to_read) + bytes_read = wintypes.DWORD() + ret_val = kernel32.ReadFile(self._handle, buf, bytes_to_read, + ctypes.byref(bytes_read), 0) + if not ret_val: + raise Exception("Read exception") + return (buf, bytes_read.value) diff --git a/cloudbaseinit/metadata/services/configdrive/windows/disk/virtual_disk.py b/cloudbaseinit/metadata/services/configdrive/windows/disk/virtual_disk.py new file mode 100644 index 00000000..c1861109 --- /dev/null +++ b/cloudbaseinit/metadata/services/configdrive/windows/disk/virtual_disk.py @@ -0,0 +1,133 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ctypes + +from ctypes import windll +from ctypes import wintypes + +kernel32 = windll.kernel32 +virtdisk = windll.virtdisk + + +class Win32_GUID(ctypes.Structure): + _fields_ = [("Data1", wintypes.DWORD), + ("Data2", wintypes.WORD), + ("Data3", wintypes.WORD), + ("Data4", wintypes.BYTE * 8)] + + +def get_WIN32_VIRTUAL_STORAGE_TYPE_VENDOR_MICROSOFT(): + guid = Win32_GUID() + guid.Data1 = 0xec984aec + guid.Data2 = 0xa0f9 + guid.Data3 = 0x47e9 + ByteArray8 = wintypes.BYTE * 8; + guid.Data4 = ByteArray8(0x90, 0x1f, 0x71, 0x41, 0x5a, 0x66, 0x34, 0x5b) + return guid + + +class Win32_VIRTUAL_STORAGE_TYPE(ctypes.Structure): + _fields_ = [ + ('DeviceId', wintypes.DWORD), + ('VendorId', Win32_GUID) + ] + + +class VirtualDisk(object): + VIRTUAL_STORAGE_TYPE_DEVICE_ISO = 1 + VIRTUAL_DISK_ACCESS_ATTACH_RO = 0x10000 + VIRTUAL_DISK_ACCESS_READ = 0xd0000 + OPEN_VIRTUAL_DISK_FLAG_NONE = 0 + DETACH_VIRTUAL_DISK_FLAG_NONE = 0 + ATTACH_VIRTUAL_DISK_FLAG_READ_ONLY = 1 + ATTACH_VIRTUAL_DISK_FLAG_NO_DRIVE_LETTER = 2 + + def __init__(self, path): + self._path = path + self._handle = 0 + + def open(self): + if self._handle: + self.close() + + vst = Win32_VIRTUAL_STORAGE_TYPE() + vst.DeviceId = self.VIRTUAL_STORAGE_TYPE_DEVICE_ISO + vst.VendorId = get_WIN32_VIRTUAL_STORAGE_TYPE_VENDOR_MICROSOFT() + + handle = wintypes.HANDLE() + ret_val = virtdisk.OpenVirtualDisk(ctypes.byref(vst), ctypes.c_wchar_p(self._path), + self.VIRTUAL_DISK_ACCESS_ATTACH_RO | self.VIRTUAL_DISK_ACCESS_READ, + self.OPEN_VIRTUAL_DISK_FLAG_NONE, 0, ctypes.byref(handle)) + if ret_val: + raise Exception("Cannot open virtual disk") + self._handle = handle + + def attach(self): + ret_val = virtdisk.AttachVirtualDisk(self._handle, 0, + self.ATTACH_VIRTUAL_DISK_FLAG_READ_ONLY, + 0, 0, 0) + if ret_val: + raise Exception("Cannot attach virtual disk") + + def detach(self): + ret_val = virtdisk.DetachVirtualDisk(self._handle, + self.DETACH_VIRTUAL_DISK_FLAG_NONE, 0) + if ret_val: + raise Exception("Cannot detach virtual disk") + + def get_physical_path(self): + buf = ctypes.create_unicode_buffer(1024) + bufLen = wintypes.DWORD(ctypes.sizeof(buf)); + ret_val = virtdisk.GetVirtualDiskPhysicalPath(self._handle, + ctypes.byref(bufLen), buf) + if ret_val: + raise Exception("Cannot get virtual disk physical path") + return buf.value + + def get_cdrom_drive_mount_point(self): + + mount_point = None + + buf = ctypes.create_unicode_buffer(2048) + buf_len = kernel32.GetLogicalDriveStringsW( + ctypes.sizeof(buf) / ctypes.sizeof(wintypes.WCHAR), buf) + if not buf_len: + raise Exception("Cannot enumerate logical devices") + + cdrom_dev = self.get_physical_path().rsplit('\\')[-1].upper() + + i = 0 + while not mount_point and i < buf_len: + curr_drive = ctypes.wstring_at(ctypes.addressof(buf) + \ + i * ctypes.sizeof(wintypes.WCHAR))[:-1] + + dev = ctypes.create_unicode_buffer(2048) + ret_val = kernel32.QueryDosDeviceW(curr_drive, dev, + ctypes.sizeof(dev) / ctypes.sizeof(wintypes.WCHAR)) + if not ret_val: + raise Exception("Cannot query NT device") + + if dev.value.rsplit('\\')[-1].upper() == cdrom_dev: + mount_point = curr_drive + else: + i += len(curr_drive) + 2 + + return mount_point + + def close(self): + kernel32.CloseHandle(self._handle) + self._handle = 0 diff --git a/cloudbaseinit/openstack/__init__.py b/cloudbaseinit/openstack/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloudbaseinit/openstack/common/__init__.py b/cloudbaseinit/openstack/common/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloudbaseinit/openstack/common/cfg.py b/cloudbaseinit/openstack/common/cfg.py new file mode 100644 index 00000000..91ce150e --- /dev/null +++ b/cloudbaseinit/openstack/common/cfg.py @@ -0,0 +1,1686 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +Configuration options which may be set on the command line or in config files. + +The schema for each option is defined using the Opt sub-classes, e.g.: + +:: + + common_opts = [ + cfg.StrOpt('bind_host', + default='0.0.0.0', + help='IP address to listen on'), + cfg.IntOpt('bind_port', + default=9292, + help='Port number to listen on') + ] + +Options can be strings, integers, floats, booleans, lists or 'multi strings':: + + enabled_apis_opt = cfg.ListOpt('enabled_apis', + default=['ec2', 'osapi_compute'], + help='List of APIs to enable by default') + + DEFAULT_EXTENSIONS = [ + 'nova.api.openstack.compute.contrib.standard_extensions' + ] + osapi_compute_extension_opt = cfg.MultiStrOpt('osapi_compute_extension', + default=DEFAULT_EXTENSIONS) + +Option schemas are registered with the config manager at runtime, but before +the option is referenced:: + + class ExtensionManager(object): + + enabled_apis_opt = cfg.ListOpt(...) + + def __init__(self, conf): + self.conf = conf + self.conf.register_opt(enabled_apis_opt) + ... + + def _load_extensions(self): + for ext_factory in self.conf.osapi_compute_extension: + .... + +A common usage pattern is for each option schema to be defined in the module or +class which uses the option:: + + opts = ... + + def add_common_opts(conf): + conf.register_opts(opts) + + def get_bind_host(conf): + return conf.bind_host + + def get_bind_port(conf): + return conf.bind_port + +An option may optionally be made available via the command line. Such options +must registered with the config manager before the command line is parsed (for +the purposes of --help and CLI arg validation):: + + cli_opts = [ + cfg.BoolOpt('verbose', + short='v', + default=False, + help='Print more verbose output'), + cfg.BoolOpt('debug', + short='d', + default=False, + help='Print debugging output'), + ] + + def add_common_opts(conf): + conf.register_cli_opts(cli_opts) + +The config manager has two CLI options defined by default, --config-file +and --config-dir:: + + class ConfigOpts(object): + + def __call__(self, ...): + + opts = [ + MultiStrOpt('config-file', + ...), + StrOpt('config-dir', + ...), + ] + + self.register_cli_opts(opts) + +Option values are parsed from any supplied config files using +openstack.common.iniparser. If none are specified, a default set is used +e.g. glance-api.conf and glance-common.conf:: + + glance-api.conf: + [DEFAULT] + bind_port = 9292 + + glance-common.conf: + [DEFAULT] + bind_host = 0.0.0.0 + +Option values in config files override those on the command line. Config files +are parsed in order, with values in later files overriding those in earlier +files. + +The parsing of CLI args and config files is initiated by invoking the config +manager e.g.:: + + conf = ConfigOpts() + conf.register_opt(BoolOpt('verbose', ...)) + conf(sys.argv[1:]) + if conf.verbose: + ... + +Options can be registered as belonging to a group:: + + rabbit_group = cfg.OptGroup(name='rabbit', + title='RabbitMQ options') + + rabbit_host_opt = cfg.StrOpt('host', + default='localhost', + help='IP/hostname to listen on'), + rabbit_port_opt = cfg.IntOpt('port', + default=5672, + help='Port number to listen on') + + def register_rabbit_opts(conf): + conf.register_group(rabbit_group) + # options can be registered under a group in either of these ways: + conf.register_opt(rabbit_host_opt, group=rabbit_group) + conf.register_opt(rabbit_port_opt, group='rabbit') + +If it no group attributes are required other than the group name, the group +need not be explicitly registered e.g. + + def register_rabbit_opts(conf): + # The group will automatically be created, equivalent calling:: + # conf.register_group(OptGroup(name='rabbit')) + conf.register_opt(rabbit_port_opt, group='rabbit') + +If no group is specified, options belong to the 'DEFAULT' section of config +files:: + + glance-api.conf: + [DEFAULT] + bind_port = 9292 + ... + + [rabbit] + host = localhost + port = 5672 + use_ssl = False + userid = guest + password = guest + virtual_host = / + +Command-line options in a group are automatically prefixed with the +group name:: + + --rabbit-host localhost --rabbit-port 9999 + +Option values in the default group are referenced as attributes/properties on +the config manager; groups are also attributes on the config manager, with +attributes for each of the options associated with the group:: + + server.start(app, conf.bind_port, conf.bind_host, conf) + + self.connection = kombu.connection.BrokerConnection( + hostname=conf.rabbit.host, + port=conf.rabbit.port, + ...) + +Option values may reference other values using PEP 292 string substitution:: + + opts = [ + cfg.StrOpt('state_path', + default=os.path.join(os.path.dirname(__file__), '../'), + help='Top-level directory for maintaining nova state'), + cfg.StrOpt('sqlite_db', + default='nova.sqlite', + help='file name for sqlite'), + cfg.StrOpt('sql_connection', + default='sqlite:///$state_path/$sqlite_db', + help='connection string for sql database'), + ] + +Note that interpolation can be avoided by using '$$'. + +FIXME(markmc): document add_cli_subparsers() + +Options may be declared as required so that an error is raised if the user +does not supply a value for the option. + +Options may be declared as secret so that their values are not leaked into +log files:: + + opts = [ + cfg.StrOpt('s3_store_access_key', secret=True), + cfg.StrOpt('s3_store_secret_key', secret=True), + ... + ] + +This module also contains a global instance of the CommonConfigOpts class +in order to support a common usage pattern in OpenStack:: + + from cloudbaseinit.openstack.common import cfg + + opts = [ + cfg.StrOpt('bind_host', default='0.0.0.0'), + cfg.IntOpt('bind_port', default=9292), + ] + + CONF = cfg.CONF + CONF.register_opts(opts) + + def start(server, app): + server.start(app, CONF.bind_port, CONF.bind_host) + +""" + +import argparse +import collections +import copy +import functools +import glob +import os +import string +import sys + +from cloudbaseinit.openstack.common import iniparser + + +class Error(Exception): + """Base class for cfg exceptions.""" + + def __init__(self, msg=None): + self.msg = msg + + def __str__(self): + return self.msg + + +class ArgsAlreadyParsedError(Error): + """Raised if a CLI opt is registered after parsing.""" + + def __str__(self): + ret = "arguments already parsed" + if self.msg: + ret += ": " + self.msg + return ret + + +class NoSuchOptError(Error, AttributeError): + """Raised if an opt which doesn't exist is referenced.""" + + def __init__(self, opt_name, group=None): + self.opt_name = opt_name + self.group = group + + def __str__(self): + if self.group is None: + return "no such option: %s" % self.opt_name + else: + return "no such option in group %s: %s" % (self.group.name, + self.opt_name) + + +class NoSuchGroupError(Error): + """Raised if a group which doesn't exist is referenced.""" + + def __init__(self, group_name): + self.group_name = group_name + + def __str__(self): + return "no such group: %s" % self.group_name + + +class DuplicateOptError(Error): + """Raised if multiple opts with the same name are registered.""" + + def __init__(self, opt_name): + self.opt_name = opt_name + + def __str__(self): + return "duplicate option: %s" % self.opt_name + + +class RequiredOptError(Error): + """Raised if an option is required but no value is supplied by the user.""" + + def __init__(self, opt_name, group=None): + self.opt_name = opt_name + self.group = group + + def __str__(self): + if self.group is None: + return "value required for option: %s" % self.opt_name + else: + return "value required for option: %s.%s" % (self.group.name, + self.opt_name) + + +class TemplateSubstitutionError(Error): + """Raised if an error occurs substituting a variable in an opt value.""" + + def __str__(self): + return "template substitution error: %s" % self.msg + + +class ConfigFilesNotFoundError(Error): + """Raised if one or more config files are not found.""" + + def __init__(self, config_files): + self.config_files = config_files + + def __str__(self): + return ('Failed to read some config files: %s' % + string.join(self.config_files, ',')) + + +class ConfigFileParseError(Error): + """Raised if there is an error parsing a config file.""" + + def __init__(self, config_file, msg): + self.config_file = config_file + self.msg = msg + + def __str__(self): + return 'Failed to parse %s: %s' % (self.config_file, self.msg) + + +class ConfigFileValueError(Error): + """Raised if a config file value does not match its opt type.""" + pass + + +def _fixpath(p): + """Apply tilde expansion and absolutization to a path.""" + return os.path.abspath(os.path.expanduser(p)) + + +def _get_config_dirs(project=None): + """Return a list of directors where config files may be located. + + :param project: an optional project name + + If a project is specified, following directories are returned:: + + ~/.${project}/ + ~/ + /etc/${project}/ + /etc/ + + Otherwise, these directories:: + + ~/ + /etc/ + """ + cfg_dirs = [ + _fixpath(os.path.join('~', '.' + project)) if project else None, + _fixpath('~'), + os.path.join('/etc', project) if project else None, + '/etc' + ] + + return filter(bool, cfg_dirs) + + +def _search_dirs(dirs, basename, extension=""): + """Search a list of directories for a given filename. + + Iterator over the supplied directories, returning the first file + found with the supplied name and extension. + + :param dirs: a list of directories + :param basename: the filename, e.g. 'glance-api' + :param extension: the file extension, e.g. '.conf' + :returns: the path to a matching file, or None + """ + for d in dirs: + path = os.path.join(d, '%s%s' % (basename, extension)) + if os.path.exists(path): + return path + + +def find_config_files(project=None, prog=None, extension='.conf'): + """Return a list of default configuration files. + + :param project: an optional project name + :param prog: the program name, defaulting to the basename of sys.argv[0] + :param extension: the type of the config file + + We default to two config files: [${project}.conf, ${prog}.conf] + + And we look for those config files in the following directories:: + + ~/.${project}/ + ~/ + /etc/${project}/ + /etc/ + + We return an absolute path for (at most) one of each the default config + files, for the topmost directory it exists in. + + For example, if project=foo, prog=bar and /etc/foo/foo.conf, /etc/bar.conf + and ~/.foo/bar.conf all exist, then we return ['/etc/foo/foo.conf', + '~/.foo/bar.conf'] + + If no project name is supplied, we only look for ${prog.conf}. + """ + if prog is None: + prog = os.path.basename(sys.argv[0]) + + cfg_dirs = _get_config_dirs(project) + + config_files = [] + if project: + config_files.append(_search_dirs(cfg_dirs, project, extension)) + config_files.append(_search_dirs(cfg_dirs, prog, extension)) + + return filter(bool, config_files) + + +def _is_opt_registered(opts, opt): + """Check whether an opt with the same name is already registered. + + The same opt may be registered multiple times, with only the first + registration having any effect. However, it is an error to attempt + to register a different opt with the same name. + + :param opts: the set of opts already registered + :param opt: the opt to be registered + :returns: True if the opt was previously registered, False otherwise + :raises: DuplicateOptError if a naming conflict is detected + """ + if opt.dest in opts: + if opts[opt.dest]['opt'] != opt: + raise DuplicateOptError(opt.name) + return True + else: + return False + + +class Opt(object): + + """Base class for all configuration options. + + An Opt object has no public methods, but has a number of public string + properties: + + name: + the name of the option, which may include hyphens + dest: + the (hyphen-less) ConfigOpts property which contains the option value + short: + a single character CLI option name + default: + the default value of the option + positional: + True if the option is a positional CLI argument + metavar: + the name shown as the argument to a CLI option in --help output + help: + an string explaining how the options value is used + """ + multi = False + + def __init__(self, name, dest=None, short=None, default=None, + positional=False, metavar=None, help=None, + secret=False, required=False, deprecated_name=None): + """Construct an Opt object. + + The only required parameter is the option's name. However, it is + common to also supply a default and help string for all options. + + :param name: the option's name + :param dest: the name of the corresponding ConfigOpts property + :param short: a single character CLI option name + :param default: the default value of the option + :param positional: True if the option is a positional CLI argument + :param metavar: the option argument to show in --help + :param help: an explanation of how the option is used + :param secret: true iff the value should be obfuscated in log output + :param required: true iff a value must be supplied for this option + :param deprecated_name: deprecated name option. Acts like an alias + """ + self.name = name + if dest is None: + self.dest = self.name.replace('-', '_') + else: + self.dest = dest + self.short = short + self.default = default + self.positional = positional + self.metavar = metavar + self.help = help + self.secret = secret + self.required = required + if deprecated_name is not None: + self.deprecated_name = deprecated_name.replace('-', '_') + else: + self.deprecated_name = None + + def __ne__(self, another): + return vars(self) != vars(another) + + def _get_from_config_parser(self, cparser, section): + """Retrieves the option value from a MultiConfigParser object. + + This is the method ConfigOpts uses to look up the option value from + config files. Most opt types override this method in order to perform + type appropriate conversion of the returned value. + + :param cparser: a ConfigParser object + :param section: a section name + """ + return self._cparser_get_with_deprecated(cparser, section) + + def _cparser_get_with_deprecated(self, cparser, section): + """If cannot find option as dest try deprecated_name alias.""" + if self.deprecated_name is not None: + return cparser.get(section, [self.dest, self.deprecated_name]) + return cparser.get(section, [self.dest]) + + def _add_to_cli(self, parser, group=None): + """Makes the option available in the command line interface. + + This is the method ConfigOpts uses to add the opt to the CLI interface + as appropriate for the opt type. Some opt types may extend this method, + others may just extend the helper methods it uses. + + :param parser: the CLI option parser + :param group: an optional OptGroup object + """ + container = self._get_argparse_container(parser, group) + kwargs = self._get_argparse_kwargs(group) + prefix = self._get_argparse_prefix('', group) + self._add_to_argparse(container, self.name, self.short, kwargs, prefix, + self.positional, self.deprecated_name) + + def _add_to_argparse(self, container, name, short, kwargs, prefix='', + positional=False, deprecated_name=None): + """Add an option to an argparse parser or group. + + :param container: an argparse._ArgumentGroup object + :param name: the opt name + :param short: the short opt name + :param kwargs: the keyword arguments for add_argument() + :param prefix: an optional prefix to prepend to the opt name + :param position: whether the optional is a positional CLI argument + :raises: DuplicateOptError if a naming confict is detected + """ + def hyphen(arg): + return arg if not positional else '' + + args = [hyphen('--') + prefix + name] + if short: + args.append(hyphen('-') + short) + if deprecated_name: + args.append(hyphen('--') + prefix + deprecated_name) + + try: + container.add_argument(*args, **kwargs) + except argparse.ArgumentError as e: + raise DuplicateOptError(e) + + def _get_argparse_container(self, parser, group): + """Returns an argparse._ArgumentGroup. + + :param parser: an argparse.ArgumentParser + :param group: an (optional) OptGroup object + :returns: an argparse._ArgumentGroup if group is given, else parser + """ + if group is not None: + return group._get_argparse_group(parser) + else: + return parser + + def _get_argparse_kwargs(self, group, **kwargs): + """Build a dict of keyword arguments for argparse's add_argument(). + + Most opt types extend this method to customize the behaviour of the + options added to argparse. + + :param group: an optional group + :param kwargs: optional keyword arguments to add to + :returns: a dict of keyword arguments + """ + if not self.positional: + dest = self.dest + if group is not None: + dest = group.name + '_' + dest + kwargs['dest'] = dest + else: + kwargs['nargs'] = '?' + kwargs.update({'metavar': self.metavar, + 'help': self.help, }) + return kwargs + + def _get_argparse_prefix(self, prefix, group): + """Build a prefix for the CLI option name, if required. + + CLI options in a group are prefixed with the group's name in order + to avoid conflicts between similarly named options in different + groups. + + :param prefix: an existing prefix to append to (e.g. 'no' or '') + :param group: an optional OptGroup object + :returns: a CLI option prefix including the group name, if appropriate + """ + if group is not None: + return group.name + '-' + prefix + else: + return prefix + + +class StrOpt(Opt): + """ + String opts do not have their values transformed and are returned as + str objects. + """ + pass + + +class BoolOpt(Opt): + + """ + Bool opts are set to True or False on the command line using --optname or + --noopttname respectively. + + In config files, boolean values are case insensitive and can be set using + 1/0, yes/no, true/false or on/off. + """ + + _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True, + '0': False, 'no': False, 'false': False, 'off': False} + + def __init__(self, *args, **kwargs): + if 'positional' in kwargs: + raise ValueError('positional boolean args not supported') + super(BoolOpt, self).__init__(*args, **kwargs) + + def _get_from_config_parser(self, cparser, section): + """Retrieve the opt value as a boolean from ConfigParser.""" + def convert_bool(v): + value = self._boolean_states.get(v.lower()) + if value is None: + raise ValueError('Unexpected boolean value %r' % v) + + return value + + return [convert_bool(v) for v in + self._cparser_get_with_deprecated(cparser, section)] + + def _add_to_cli(self, parser, group=None): + """Extends the base class method to add the --nooptname option.""" + super(BoolOpt, self)._add_to_cli(parser, group) + self._add_inverse_to_argparse(parser, group) + + def _add_inverse_to_argparse(self, parser, group): + """Add the --nooptname option to the option parser.""" + container = self._get_argparse_container(parser, group) + kwargs = self._get_argparse_kwargs(group, action='store_false') + prefix = self._get_argparse_prefix('no', group) + kwargs["help"] = "The inverse of --" + self.name + self._add_to_argparse(container, self.name, None, kwargs, prefix, + self.positional, self.deprecated_name) + + def _get_argparse_kwargs(self, group, action='store_true', **kwargs): + """Extends the base argparse keyword dict for boolean options.""" + + kwargs = super(BoolOpt, self)._get_argparse_kwargs(group, **kwargs) + + # metavar has no effect for BoolOpt + if 'metavar' in kwargs: + del kwargs['metavar'] + + if action != 'store_true': + action = 'store_false' + + kwargs['action'] = action + + return kwargs + + +class IntOpt(Opt): + + """Int opt values are converted to integers using the int() builtin.""" + + def _get_from_config_parser(self, cparser, section): + """Retrieve the opt value as a integer from ConfigParser.""" + return [int(v) for v in self._cparser_get_with_deprecated(cparser, + section)] + + def _get_argparse_kwargs(self, group, **kwargs): + """Extends the base argparse keyword dict for integer options.""" + return super(IntOpt, + self)._get_argparse_kwargs(group, type=int, **kwargs) + + +class FloatOpt(Opt): + + """Float opt values are converted to floats using the float() builtin.""" + + def _get_from_config_parser(self, cparser, section): + """Retrieve the opt value as a float from ConfigParser.""" + return [float(v) for v in + self._cparser_get_with_deprecated(cparser, section)] + + def _get_argparse_kwargs(self, group, **kwargs): + """Extends the base argparse keyword dict for float options.""" + return super(FloatOpt, self)._get_argparse_kwargs(group, + type=float, **kwargs) + + +class ListOpt(Opt): + + """ + List opt values are simple string values separated by commas. The opt value + is a list containing these strings. + """ + + class _StoreListAction(argparse.Action): + """ + An argparse action for parsing an option value into a list. + """ + def __call__(self, parser, namespace, values, option_string=None): + if values is not None: + values = [a.strip() for a in values.split(',')] + setattr(namespace, self.dest, values) + + def _get_from_config_parser(self, cparser, section): + """Retrieve the opt value as a list from ConfigParser.""" + return [v.split(',') for v in + self._cparser_get_with_deprecated(cparser, section)] + + def _get_argparse_kwargs(self, group, **kwargs): + """Extends the base argparse keyword dict for list options.""" + return Opt._get_argparse_kwargs(self, + group, + action=ListOpt._StoreListAction, + **kwargs) + + +class MultiStrOpt(Opt): + + """ + Multistr opt values are string opts which may be specified multiple times. + The opt value is a list containing all the string values specified. + """ + multi = True + + def _get_argparse_kwargs(self, group, **kwargs): + """Extends the base argparse keyword dict for multi str options.""" + kwargs = super(MultiStrOpt, self)._get_argparse_kwargs(group) + if not self.positional: + kwargs['action'] = 'append' + else: + kwargs['nargs'] = '*' + return kwargs + + def _cparser_get_with_deprecated(self, cparser, section): + """If cannot find option as dest try deprecated_name alias.""" + if self.deprecated_name is not None: + return cparser.get(section, [self.dest, self.deprecated_name], + multi=True) + return cparser.get(section, [self.dest], multi=True) + + +class OptGroup(object): + + """ + Represents a group of opts. + + CLI opts in the group are automatically prefixed with the group name. + + Each group corresponds to a section in config files. + + An OptGroup object has no public methods, but has a number of public string + properties: + + name: + the name of the group + title: + the group title as displayed in --help + help: + the group description as displayed in --help + """ + + def __init__(self, name, title=None, help=None): + """Constructs an OptGroup object. + + :param name: the group name + :param title: the group title for --help + :param help: the group description for --help + """ + self.name = name + if title is None: + self.title = "%s options" % title + else: + self.title = title + self.help = help + + self._opts = {} # dict of dicts of (opt:, override:, default:) + self._argparse_group = None + + def _register_opt(self, opt, cli=False): + """Add an opt to this group. + + :param opt: an Opt object + :param cli: whether this is a CLI option + :returns: False if previously registered, True otherwise + :raises: DuplicateOptError if a naming conflict is detected + """ + if _is_opt_registered(self._opts, opt): + return False + + self._opts[opt.dest] = {'opt': opt, 'cli': cli} + + return True + + def _unregister_opt(self, opt): + """Remove an opt from this group. + + :param opt: an Opt object + """ + if opt.dest in self._opts: + del self._opts[opt.dest] + + def _get_argparse_group(self, parser): + if self._argparse_group is None: + """Build an argparse._ArgumentGroup for this group.""" + self._argparse_group = parser.add_argument_group(self.title, + self.help) + return self._argparse_group + + def _clear(self): + """Clear this group's option parsing state.""" + self._argparse_group = None + + +class ParseError(iniparser.ParseError): + def __init__(self, msg, lineno, line, filename): + super(ParseError, self).__init__(msg, lineno, line) + self.filename = filename + + def __str__(self): + return 'at %s:%d, %s: %r' % (self.filename, self.lineno, + self.msg, self.line) + + +class ConfigParser(iniparser.BaseParser): + def __init__(self, filename, sections): + super(ConfigParser, self).__init__() + self.filename = filename + self.sections = sections + self.section = None + + def parse(self): + with open(self.filename) as f: + return super(ConfigParser, self).parse(f) + + def new_section(self, section): + self.section = section + self.sections.setdefault(self.section, {}) + + def assignment(self, key, value): + if not self.section: + raise self.error_no_section() + + self.sections[self.section].setdefault(key, []) + self.sections[self.section][key].append('\n'.join(value)) + + def parse_exc(self, msg, lineno, line=None): + return ParseError(msg, lineno, line, self.filename) + + def error_no_section(self): + return self.parse_exc('Section must be started before assignment', + self.lineno) + + +class MultiConfigParser(object): + def __init__(self): + self.parsed = [] + + def read(self, config_files): + read_ok = [] + + for filename in config_files: + sections = {} + parser = ConfigParser(filename, sections) + + try: + parser.parse() + except IOError: + continue + self.parsed.insert(0, sections) + read_ok.append(filename) + + return read_ok + + def get(self, section, names, multi=False): + rvalue = [] + for sections in self.parsed: + if section not in sections: + continue + for name in names: + if name in sections[section]: + if multi: + rvalue = sections[section][name] + rvalue + else: + return sections[section][name] + if multi and rvalue != []: + return rvalue + raise KeyError + + +class ConfigOpts(collections.Mapping): + + """ + Config options which may be set on the command line or in config files. + + ConfigOpts is a configuration option manager with APIs for registering + option schemas, grouping options, parsing option values and retrieving + the values of options. + """ + + def __init__(self): + """Construct a ConfigOpts object.""" + self._opts = {} # dict of dicts of (opt:, override:, default:) + self._groups = {} + + self._args = None + + # for subparser support, a root parser should be initialized earlier + # and conserved for later use + self._pre_init_parser = None + self._oparser = None + self._cparser = None + self._cli_values = {} + self.__cache = {} + self._config_opts = [] + + def _pre_setup(self, project, prog, version, usage, default_config_files): + """Initialize a ConfigCliParser object for option parsing.""" + + if prog is None: + prog = os.path.basename(sys.argv[0]) + + if default_config_files is None: + default_config_files = find_config_files(project, prog) + + # if _pre_init_parser does not exist, create one + if self._pre_init_parser is None: + self._oparser = argparse.ArgumentParser(prog=prog, usage=usage) + # otherwise, use the pre-initialized parser with subparsers + # and re-initialize parser + else: + self._oparser = self._pre_init_parser + self._oparser.prog = prog + self._oparser.version = version + self._oparser.usage = usage + self._pre_init_parser = None + + self._oparser.add_argument('--version', + action='version', + version=version) + + return prog, default_config_files + + def _setup(self, project, prog, version, usage, default_config_files): + """Initialize a ConfigOpts object for option parsing.""" + + self._config_opts = [ + MultiStrOpt('config-file', + default=default_config_files, + metavar='PATH', + help='Path to a config file to use. Multiple config ' + 'files can be specified, with values in later ' + 'files taking precedence. The default files ' + ' used are: %s' % (default_config_files, )), + StrOpt('config-dir', + metavar='DIR', + help='Path to a config directory to pull *.conf ' + 'files from. This file set is sorted, so as to ' + 'provide a predictable parse order if individual ' + 'options are over-ridden. The set is parsed after ' + 'the file(s), if any, specified via --config-file, ' + 'hence over-ridden options in the directory take ' + 'precedence.'), + ] + self.register_cli_opts(self._config_opts) + + self.project = project + self.prog = prog + self.version = version + self.usage = usage + self.default_config_files = default_config_files + + def __clear_cache(f): + @functools.wraps(f) + def __inner(self, *args, **kwargs): + if kwargs.pop('clear_cache', True): + self.__cache.clear() + return f(self, *args, **kwargs) + + return __inner + + def __call__(self, + args=None, + project=None, + prog=None, + version=None, + usage=None, + default_config_files=None): + """Parse command line arguments and config files. + + Calling a ConfigOpts object causes the supplied command line arguments + and config files to be parsed, causing opt values to be made available + as attributes of the object. + + The object may be called multiple times, each time causing the previous + set of values to be overwritten. + + Automatically registers the --config-file option with either a supplied + list of default config files, or a list from find_config_files(). + + If the --config-dir option is set, any *.conf files from this + directory are pulled in, after all the file(s) specified by the + --config-file option. + + :param args: command line arguments (defaults to sys.argv[1:]) + :param project: the toplevel project name, used to locate config files + :param prog: the name of the program (defaults to sys.argv[0] basename) + :param version: the program version (for --version) + :param usage: a usage string (%prog will be expanded) + :param default_config_files: config files to use by default + :returns: the list of arguments left over after parsing options + :raises: SystemExit, ConfigFilesNotFoundError, ConfigFileParseError, + RequiredOptError, DuplicateOptError + """ + + self.clear() + + prog, default_config_files = self._pre_setup(project, + prog, + version, + usage, + default_config_files) + + self._setup(project, prog, version, usage, default_config_files) + + self._cli_values = self._parse_cli_opts(args) + + self._parse_config_files() + + self._check_required_opts() + + def __getattr__(self, name): + """Look up an option value and perform string substitution. + + :param name: the opt name (or 'dest', more precisely) + :returns: the option value (after string subsititution) or a GroupAttr + :raises: NoSuchOptError,ConfigFileValueError,TemplateSubstitutionError + """ + return self._get(name) + + def __getitem__(self, key): + """Look up an option value and perform string substitution.""" + return self.__getattr__(key) + + def __contains__(self, key): + """Return True if key is the name of a registered opt or group.""" + return key in self._opts or key in self._groups + + def __iter__(self): + """Iterate over all registered opt and group names.""" + for key in self._opts.keys() + self._groups.keys(): + yield key + + def __len__(self): + """Return the number of options and option groups.""" + return len(self._opts) + len(self._groups) + + def add_cli_subparsers(self, **kwargs): + # only add subparsers to pre-initialized root parser + # to avoid cleared by self.clear() + if self._pre_init_parser is None: + self._pre_init_parser = argparse.ArgumentParser() + return self._pre_init_parser.add_subparsers(**kwargs) + + def reset(self): + """Clear the object state and unset overrides and defaults.""" + self._unset_defaults_and_overrides() + self.clear() + + @__clear_cache + def clear(self): + """Clear the state of the object to before it was called.""" + self._args = None + self._cli_values.clear() + self._oparser = None + self._cparser = None + self.unregister_opts(self._config_opts) + for group in self._groups.values(): + group._clear() + + @__clear_cache + def register_opt(self, opt, group=None, cli=False): + """Register an option schema. + + Registering an option schema makes any option value which is previously + or subsequently parsed from the command line or config files available + as an attribute of this object. + + :param opt: an instance of an Opt sub-class + :param cli: whether this is a CLI option + :param group: an optional OptGroup object or group name + :return: False if the opt was already register, True otherwise + :raises: DuplicateOptError + """ + if group is not None: + group = self._get_group(group, autocreate=True) + return group._register_opt(opt, cli) + + if _is_opt_registered(self._opts, opt): + return False + + self._opts[opt.dest] = {'opt': opt, 'cli': cli} + + return True + + @__clear_cache + def register_opts(self, opts, group=None): + """Register multiple option schemas at once.""" + for opt in opts: + self.register_opt(opt, group, clear_cache=False) + + @__clear_cache + def register_cli_opt(self, opt, group=None): + """Register a CLI option schema. + + CLI option schemas must be registered before the command line and + config files are parsed. This is to ensure that all CLI options are + show in --help and option validation works as expected. + + :param opt: an instance of an Opt sub-class + :param group: an optional OptGroup object or group name + :return: False if the opt was already register, True otherwise + :raises: DuplicateOptError, ArgsAlreadyParsedError + """ + if self._args is not None: + raise ArgsAlreadyParsedError("cannot register CLI option") + + return self.register_opt(opt, group, cli=True, clear_cache=False) + + @__clear_cache + def register_cli_opts(self, opts, group=None): + """Register multiple CLI option schemas at once.""" + for opt in opts: + self.register_cli_opt(opt, group, clear_cache=False) + + def register_group(self, group): + """Register an option group. + + An option group must be registered before options can be registered + with the group. + + :param group: an OptGroup object + """ + if group.name in self._groups: + return + + self._groups[group.name] = copy.copy(group) + + @__clear_cache + def unregister_opt(self, opt, group=None): + """Unregister an option. + + :param opt: an Opt object + :param group: an optional OptGroup object or group name + :raises: ArgsAlreadyParsedError, NoSuchGroupError + """ + if self._args is not None: + raise ArgsAlreadyParsedError("reset before unregistering options") + + if group is not None: + self._get_group(group)._unregister_opt(opt) + elif opt.dest in self._opts: + del self._opts[opt.dest] + + @__clear_cache + def unregister_opts(self, opts, group=None): + """Unregister multiple CLI option schemas at once.""" + for opt in opts: + self.unregister_opt(opt, group, clear_cache=False) + + def import_opt(self, name, module_str, group=None): + """Import an option definition from a module. + + Import a module and check that a given option is registered. + + This is intended for use with global configuration objects + like cfg.CONF where modules commonly register options with + CONF at module load time. If one module requires an option + defined by another module it can use this method to explicitly + declare the dependency. + + :param name: the name/dest of the opt + :param module_str: the name of a module to import + :param group: an option OptGroup object or group name + :raises: NoSuchOptError, NoSuchGroupError + """ + __import__(module_str) + self._get_opt_info(name, group) + + @__clear_cache + def set_override(self, name, override, group=None): + """Override an opt value. + + Override the command line, config file and default values of a + given option. + + :param name: the name/dest of the opt + :param override: the override value + :param group: an option OptGroup object or group name + :raises: NoSuchOptError, NoSuchGroupError + """ + opt_info = self._get_opt_info(name, group) + opt_info['override'] = override + + @__clear_cache + def set_default(self, name, default, group=None): + """Override an opt's default value. + + Override the default value of given option. A command line or + config file value will still take precedence over this default. + + :param name: the name/dest of the opt + :param default: the default value + :param group: an option OptGroup object or group name + :raises: NoSuchOptError, NoSuchGroupError + """ + opt_info = self._get_opt_info(name, group) + opt_info['default'] = default + + @__clear_cache + def clear_override(self, name, group=None): + """Clear an override an opt value. + + Clear a previously set override of the command line, config file + and default values of a given option. + + :param name: the name/dest of the opt + :param group: an option OptGroup object or group name + :raises: NoSuchOptError, NoSuchGroupError + """ + opt_info = self._get_opt_info(name, group) + opt_info.pop('override', None) + + @__clear_cache + def clear_default(self, name, group=None): + """Clear an override an opt's default value. + + Clear a previously set override of the default value of given option. + + :param name: the name/dest of the opt + :param group: an option OptGroup object or group name + :raises: NoSuchOptError, NoSuchGroupError + """ + opt_info = self._get_opt_info(name, group) + opt_info.pop('default', None) + + def _all_opt_infos(self): + """A generator function for iteration opt infos.""" + for info in self._opts.values(): + yield info, None + for group in self._groups.values(): + for info in group._opts.values(): + yield info, group + + def _all_cli_opts(self): + """A generator function for iterating CLI opts.""" + for info, group in self._all_opt_infos(): + if info['cli']: + yield info['opt'], group + + def _unset_defaults_and_overrides(self): + """Unset any default or override on all options.""" + for info, group in self._all_opt_infos(): + info.pop('default', None) + info.pop('override', None) + + def find_file(self, name): + """Locate a file located alongside the config files. + + Search for a file with the supplied basename in the directories + which we have already loaded config files from and other known + configuration directories. + + The directory, if any, supplied by the config_dir option is + searched first. Then the config_file option is iterated over + and each of the base directories of the config_files values + are searched. Failing both of these, the standard directories + searched by the module level find_config_files() function is + used. The first matching file is returned. + + :param basename: the filename, e.g. 'policy.json' + :returns: the path to a matching file, or None + """ + dirs = [] + if self.config_dir: + dirs.append(_fixpath(self.config_dir)) + + for cf in reversed(self.config_file): + dirs.append(os.path.dirname(_fixpath(cf))) + + dirs.extend(_get_config_dirs(self.project)) + + return _search_dirs(dirs, name) + + def log_opt_values(self, logger, lvl): + """Log the value of all registered opts. + + It's often useful for an app to log its configuration to a log file at + startup for debugging. This method dumps to the entire config state to + the supplied logger at a given log level. + + :param logger: a logging.Logger object + :param lvl: the log level (e.g. logging.DEBUG) arg to logger.log() + """ + logger.log(lvl, "*" * 80) + logger.log(lvl, "Configuration options gathered from:") + logger.log(lvl, "command line args: %s", self._args) + logger.log(lvl, "config files: %s", self.config_file) + logger.log(lvl, "=" * 80) + + def _sanitize(opt, value): + """Obfuscate values of options declared secret""" + return value if not opt.secret else '*' * len(str(value)) + + for opt_name in sorted(self._opts): + opt = self._get_opt_info(opt_name)['opt'] + logger.log(lvl, "%-30s = %s", opt_name, + _sanitize(opt, getattr(self, opt_name))) + + for group_name in self._groups: + group_attr = self.GroupAttr(self, self._get_group(group_name)) + for opt_name in sorted(self._groups[group_name]._opts): + opt = self._get_opt_info(opt_name, group_name)['opt'] + logger.log(lvl, "%-30s = %s", + "%s.%s" % (group_name, opt_name), + _sanitize(opt, getattr(group_attr, opt_name))) + + logger.log(lvl, "*" * 80) + + def print_usage(self, file=None): + """Print the usage message for the current program.""" + self._oparser.print_usage(file) + + def print_help(self, file=None): + """Print the help message for the current program.""" + self._oparser.print_help(file) + + def _get(self, name, group=None): + if isinstance(group, OptGroup): + key = (group.name, name) + else: + key = (group, name) + try: + return self.__cache[key] + except KeyError: + value = self._substitute(self._do_get(name, group)) + self.__cache[key] = value + return value + + def _do_get(self, name, group=None): + """Look up an option value. + + :param name: the opt name (or 'dest', more precisely) + :param group: an OptGroup + :returns: the option value, or a GroupAttr object + :raises: NoSuchOptError, NoSuchGroupError, ConfigFileValueError, + TemplateSubstitutionError + """ + if group is None and name in self._groups: + return self.GroupAttr(self, self._get_group(name)) + + info = self._get_opt_info(name, group) + opt = info['opt'] + + if 'override' in info: + return info['override'] + + values = [] + if self._cparser is not None: + section = group.name if group is not None else 'DEFAULT' + try: + value = opt._get_from_config_parser(self._cparser, section) + except KeyError: + pass + except ValueError as ve: + raise ConfigFileValueError(str(ve)) + else: + if not opt.multi: + # No need to continue since the last value wins + return value[-1] + values.extend(value) + + name = name if group is None else group.name + '_' + name + value = self._cli_values.get(name) + if value is not None: + if not opt.multi: + return value + + # argparse ignores default=None for nargs='*' + if opt.positional and not value: + value = opt.default + + return value + values + + if values: + return values + + if 'default' in info: + return info['default'] + + return opt.default + + def _substitute(self, value): + """Perform string template substitution. + + Substitute any template variables (e.g. $foo, ${bar}) in the supplied + string value(s) with opt values. + + :param value: the string value, or list of string values + :returns: the substituted string(s) + """ + if isinstance(value, list): + return [self._substitute(i) for i in value] + elif isinstance(value, str): + tmpl = string.Template(value) + return tmpl.safe_substitute(self.StrSubWrapper(self)) + else: + return value + + def _get_group(self, group_or_name, autocreate=False): + """Looks up a OptGroup object. + + Helper function to return an OptGroup given a parameter which can + either be the group's name or an OptGroup object. + + The OptGroup object returned is from the internal dict of OptGroup + objects, which will be a copy of any OptGroup object that users of + the API have access to. + + :param group_or_name: the group's name or the OptGroup object itself + :param autocreate: whether to auto-create the group if it's not found + :raises: NoSuchGroupError + """ + group = group_or_name if isinstance(group_or_name, OptGroup) else None + group_name = group.name if group else group_or_name + + if not group_name in self._groups: + if not group is None or not autocreate: + raise NoSuchGroupError(group_name) + + self.register_group(OptGroup(name=group_name)) + + return self._groups[group_name] + + def _get_opt_info(self, opt_name, group=None): + """Return the (opt, override, default) dict for an opt. + + :param opt_name: an opt name/dest + :param group: an optional group name or OptGroup object + :raises: NoSuchOptError, NoSuchGroupError + """ + if group is None: + opts = self._opts + else: + group = self._get_group(group) + opts = group._opts + + if not opt_name in opts: + raise NoSuchOptError(opt_name, group) + + return opts[opt_name] + + def _parse_config_files(self): + """Parse the config files from --config-file and --config-dir. + + :raises: ConfigFilesNotFoundError, ConfigFileParseError + """ + config_files = list(self.config_file) + + if self.config_dir: + config_dir_glob = os.path.join(self.config_dir, '*.conf') + config_files += sorted(glob.glob(config_dir_glob)) + + config_files = [_fixpath(p) for p in config_files] + + self._cparser = MultiConfigParser() + + try: + read_ok = self._cparser.read(config_files) + except iniparser.ParseError as pe: + raise ConfigFileParseError(pe.filename, str(pe)) + + if read_ok != config_files: + not_read_ok = filter(lambda f: f not in read_ok, config_files) + raise ConfigFilesNotFoundError(not_read_ok) + + def _check_required_opts(self): + """Check that all opts marked as required have values specified. + + :raises: RequiredOptError + """ + for info, group in self._all_opt_infos(): + opt = info['opt'] + + if opt.required: + if ('default' in info or 'override' in info): + continue + + if self._get(opt.dest, group) is None: + raise RequiredOptError(opt.name, group) + + def _parse_cli_opts(self, args): + """Parse command line options. + + Initializes the command line option parser and parses the supplied + command line arguments. + + :param args: the command line arguments + :returns: a dict of parsed option values + :raises: SystemExit, DuplicateOptError + + """ + self._args = args + + for opt, group in self._all_cli_opts(): + opt._add_to_cli(self._oparser, group) + + return vars(self._oparser.parse_args(args)) + + class GroupAttr(collections.Mapping): + + """ + A helper class representing the option values of a group as a mapping + and attributes. + """ + + def __init__(self, conf, group): + """Construct a GroupAttr object. + + :param conf: a ConfigOpts object + :param group: an OptGroup object + """ + self._conf = conf + self._group = group + + def __getattr__(self, name): + """Look up an option value and perform template substitution.""" + return self._conf._get(name, self._group) + + def __getitem__(self, key): + """Look up an option value and perform string substitution.""" + return self.__getattr__(key) + + def __contains__(self, key): + """Return True if key is the name of a registered opt or group.""" + return key in self._group._opts + + def __iter__(self): + """Iterate over all registered opt and group names.""" + for key in self._group._opts.keys(): + yield key + + def __len__(self): + """Return the number of options and option groups.""" + return len(self._group._opts) + + class StrSubWrapper(object): + + """ + A helper class exposing opt values as a dict for string substitution. + """ + + def __init__(self, conf): + """Construct a StrSubWrapper object. + + :param conf: a ConfigOpts object + """ + self.conf = conf + + def __getitem__(self, key): + """Look up an opt value from the ConfigOpts object. + + :param key: an opt name + :returns: an opt value + :raises: TemplateSubstitutionError if attribute is a group + """ + value = getattr(self.conf, key) + if isinstance(value, self.conf.GroupAttr): + raise TemplateSubstitutionError( + 'substituting group %s not supported' % key) + return value + + +class CommonConfigOpts(ConfigOpts): + + DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s" + DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" + + common_cli_opts = [ + BoolOpt('debug', + short='d', + default=False, + help='Print debugging output'), + BoolOpt('verbose', + short='v', + default=False, + help='Print more verbose output'), + ] + + logging_cli_opts = [ + StrOpt('log-config', + metavar='PATH', + help='If this option is specified, the logging configuration ' + 'file specified is used and overrides any other logging ' + 'options specified. Please see the Python logging module ' + 'documentation for details on logging configuration ' + 'files.'), + StrOpt('log-format', + default=DEFAULT_LOG_FORMAT, + metavar='FORMAT', + help='A logging.Formatter log message format string which may ' + 'use any of the available logging.LogRecord attributes. ' + 'Default: %(default)s'), + StrOpt('log-date-format', + default=DEFAULT_LOG_DATE_FORMAT, + metavar='DATE_FORMAT', + help='Format string for %%(asctime)s in log records. ' + 'Default: %(default)s'), + StrOpt('log-file', + metavar='PATH', + help='(Optional) Name of log file to output to. ' + 'If not set, logging will go to stdout.'), + StrOpt('log-dir', + help='(Optional) The directory to keep log files in ' + '(will be prepended to --logfile)'), + BoolOpt('use-syslog', + default=False, + help='Use syslog for logging.'), + StrOpt('syslog-log-facility', + default='LOG_USER', + help='syslog facility to receive log lines') + ] + + def __init__(self): + super(CommonConfigOpts, self).__init__() + self.register_cli_opts(self.common_cli_opts) + self.register_cli_opts(self.logging_cli_opts) + + +CONF = CommonConfigOpts() diff --git a/cloudbaseinit/openstack/common/context.py b/cloudbaseinit/openstack/common/context.py new file mode 100644 index 00000000..dd7dd04c --- /dev/null +++ b/cloudbaseinit/openstack/common/context.py @@ -0,0 +1,81 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Simple class that stores security context information in the web request. + +Projects should subclass this class if they wish to enhance the request +context or provide additional information in their specific WSGI pipeline. +""" + +import itertools +import uuid + + +def generate_request_id(): + return 'req-' + str(uuid.uuid4()) + + +class RequestContext(object): + + """ + Stores information about the security context under which the user + accesses the system, as well as additional request information. + """ + + def __init__(self, auth_tok=None, user=None, tenant=None, is_admin=False, + read_only=False, show_deleted=False, request_id=None): + self.auth_tok = auth_tok + self.user = user + self.tenant = tenant + self.is_admin = is_admin + self.read_only = read_only + self.show_deleted = show_deleted + if not request_id: + request_id = generate_request_id() + self.request_id = request_id + + def to_dict(self): + return {'user': self.user, + 'tenant': self.tenant, + 'is_admin': self.is_admin, + 'read_only': self.read_only, + 'show_deleted': self.show_deleted, + 'auth_token': self.auth_tok, + 'request_id': self.request_id} + + +def get_admin_context(show_deleted="no"): + context = RequestContext(None, + tenant=None, + is_admin=True, + show_deleted=show_deleted) + return context + + +def get_context_from_function_and_args(function, args, kwargs): + """Find an arg of type RequestContext and return it. + + This is useful in a couple of decorators where we don't + know much about the function we're wrapping. + """ + + for arg in itertools.chain(kwargs.values(), args): + if isinstance(arg, RequestContext): + return arg + + return None diff --git a/cloudbaseinit/openstack/common/eventlet_backdoor.py b/cloudbaseinit/openstack/common/eventlet_backdoor.py new file mode 100644 index 00000000..7ca1bce7 --- /dev/null +++ b/cloudbaseinit/openstack/common/eventlet_backdoor.py @@ -0,0 +1,80 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Openstack, LLC. +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import gc +import pprint +import sys +import traceback + +import eventlet +import eventlet.backdoor +import greenlet + +from cloudbaseinit.openstack.common import cfg + +eventlet_backdoor_opts = [ + cfg.IntOpt('backdoor_port', + default=None, + help='port for eventlet backdoor to listen') +] + +CONF = cfg.CONF +CONF.register_opts(eventlet_backdoor_opts) + + +def _dont_use_this(): + print "Don't use this, just disconnect instead" + + +def _find_objects(t): + return filter(lambda o: isinstance(o, t), gc.get_objects()) + + +def _print_greenthreads(): + for i, gt in enumerate(_find_objects(greenlet.greenlet)): + print i, gt + traceback.print_stack(gt.gr_frame) + print + + +def initialize_if_enabled(): + backdoor_locals = { + 'exit': _dont_use_this, # So we don't exit the entire process + 'quit': _dont_use_this, # So we don't exit the entire process + 'fo': _find_objects, + 'pgt': _print_greenthreads, + } + + if CONF.backdoor_port is None: + return None + + # NOTE(johannes): The standard sys.displayhook will print the value of + # the last expression and set it to __builtin__._, which overwrites + # the __builtin__._ that gettext sets. Let's switch to using pprint + # since it won't interact poorly with gettext, and it's easier to + # read the output too. + def displayhook(val): + if val is not None: + pprint.pprint(val) + sys.displayhook = displayhook + + sock = eventlet.listen(('localhost', CONF.backdoor_port)) + port = sock.getsockname()[1] + eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, + locals=backdoor_locals) + return port diff --git a/cloudbaseinit/openstack/common/excutils.py b/cloudbaseinit/openstack/common/excutils.py new file mode 100644 index 00000000..48a9faf3 --- /dev/null +++ b/cloudbaseinit/openstack/common/excutils.py @@ -0,0 +1,51 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Exception related utilities. +""" + +import contextlib +import logging +import sys +import traceback + +from cloudbaseinit.openstack.common.gettextutils import _ + + +@contextlib.contextmanager +def save_and_reraise_exception(): + """Save current exception, run some code and then re-raise. + + In some cases the exception context can be cleared, resulting in None + being attempted to be re-raised after an exception handler is run. This + can happen when eventlet switches greenthreads or when running an + exception handler, code raises and catches an exception. In both + cases the exception context will be cleared. + + To work around this, we save the exception state, run handler code, and + then re-raise the original exception. If another exception occurs, the + saved exception is logged and the new exception is re-raised. + """ + type_, value, tb = sys.exc_info() + try: + yield + except Exception: + logging.error(_('Original exception being dropped: %s'), + traceback.format_exception(type_, value, tb)) + raise + raise type_, value, tb diff --git a/cloudbaseinit/openstack/common/fileutils.py b/cloudbaseinit/openstack/common/fileutils.py new file mode 100644 index 00000000..4746ad49 --- /dev/null +++ b/cloudbaseinit/openstack/common/fileutils.py @@ -0,0 +1,35 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import errno +import os + + +def ensure_tree(path): + """Create a directory (and any ancestor directories required) + + :param path: Directory to create + """ + try: + os.makedirs(path) + except OSError as exc: + if exc.errno == errno.EEXIST: + if not os.path.isdir(path): + raise + else: + raise diff --git a/cloudbaseinit/openstack/common/gettextutils.py b/cloudbaseinit/openstack/common/gettextutils.py new file mode 100644 index 00000000..4ea4e03d --- /dev/null +++ b/cloudbaseinit/openstack/common/gettextutils.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +gettext for openstack-common modules. + +Usual usage in an openstack.common module: + + from cloudbaseinit.openstack.common.gettextutils import _ +""" + +import gettext + + +t = gettext.translation('openstack-common', 'locale', fallback=True) + + +def _(msg): + return t.ugettext(msg) diff --git a/cloudbaseinit/openstack/common/importutils.py b/cloudbaseinit/openstack/common/importutils.py new file mode 100644 index 00000000..2a28b455 --- /dev/null +++ b/cloudbaseinit/openstack/common/importutils.py @@ -0,0 +1,59 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Import related utilities and helper functions. +""" + +import sys +import traceback + + +def import_class(import_str): + """Returns a class from a string including module and class""" + mod_str, _sep, class_str = import_str.rpartition('.') + try: + __import__(mod_str) + return getattr(sys.modules[mod_str], class_str) + except (ValueError, AttributeError): + raise ImportError('Class %s cannot be found (%s)' % + (class_str, + traceback.format_exception(*sys.exc_info()))) + + +def import_object(import_str, *args, **kwargs): + """Import a class and return an instance of it.""" + return import_class(import_str)(*args, **kwargs) + + +def import_object_ns(name_space, import_str, *args, **kwargs): + """ + Import a class and return an instance of it, first by trying + to find the class in a default namespace, then failing back to + a full path if not found in the default namespace. + """ + import_value = "%s.%s" % (name_space, import_str) + try: + return import_class(import_value)(*args, **kwargs) + except ImportError: + return import_class(import_str)(*args, **kwargs) + + +def import_module(import_str): + """Import a module.""" + __import__(import_str) + return sys.modules[import_str] diff --git a/cloudbaseinit/openstack/common/iniparser.py b/cloudbaseinit/openstack/common/iniparser.py new file mode 100644 index 00000000..24128444 --- /dev/null +++ b/cloudbaseinit/openstack/common/iniparser.py @@ -0,0 +1,130 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +class ParseError(Exception): + def __init__(self, message, lineno, line): + self.msg = message + self.line = line + self.lineno = lineno + + def __str__(self): + return 'at line %d, %s: %r' % (self.lineno, self.msg, self.line) + + +class BaseParser(object): + lineno = 0 + parse_exc = ParseError + + def _assignment(self, key, value): + self.assignment(key, value) + return None, [] + + def _get_section(self, line): + if line[-1] != ']': + return self.error_no_section_end_bracket(line) + if len(line) <= 2: + return self.error_no_section_name(line) + + return line[1:-1] + + def _split_key_value(self, line): + colon = line.find(':') + equal = line.find('=') + if colon < 0 and equal < 0: + return self.error_invalid_assignment(line) + + if colon < 0 or (equal >= 0 and equal < colon): + key, value = line[:equal], line[equal + 1:] + else: + key, value = line[:colon], line[colon + 1:] + + value = value.strip() + if ((value and value[0] == value[-1]) and + (value[0] == "\"" or value[0] == "'")): + value = value[1:-1] + return key.strip(), [value] + + def parse(self, lineiter): + key = None + value = [] + + for line in lineiter: + self.lineno += 1 + + line = line.rstrip() + if not line: + # Blank line, ends multi-line values + if key: + key, value = self._assignment(key, value) + continue + elif line[0] in (' ', '\t'): + # Continuation of previous assignment + if key is None: + self.error_unexpected_continuation(line) + else: + value.append(line.lstrip()) + continue + + if key: + # Flush previous assignment, if any + key, value = self._assignment(key, value) + + if line[0] == '[': + # Section start + section = self._get_section(line) + if section: + self.new_section(section) + elif line[0] in '#;': + self.comment(line[1:].lstrip()) + else: + key, value = self._split_key_value(line) + if not key: + return self.error_empty_key(line) + + if key: + # Flush previous assignment, if any + self._assignment(key, value) + + def assignment(self, key, value): + """Called when a full assignment is parsed""" + raise NotImplementedError() + + def new_section(self, section): + """Called when a new section is started""" + raise NotImplementedError() + + def comment(self, comment): + """Called when a comment is parsed""" + pass + + def error_invalid_assignment(self, line): + raise self.parse_exc("No ':' or '=' found in assignment", + self.lineno, line) + + def error_empty_key(self, line): + raise self.parse_exc('Key cannot be empty', self.lineno, line) + + def error_unexpected_continuation(self, line): + raise self.parse_exc('Unexpected continuation line', + self.lineno, line) + + def error_no_section_end_bracket(self, line): + raise self.parse_exc('Invalid section (must end with ])', + self.lineno, line) + + def error_no_section_name(self, line): + raise self.parse_exc('Empty section name', self.lineno, line) diff --git a/cloudbaseinit/openstack/common/jsonutils.py b/cloudbaseinit/openstack/common/jsonutils.py new file mode 100644 index 00000000..aa1735db --- /dev/null +++ b/cloudbaseinit/openstack/common/jsonutils.py @@ -0,0 +1,148 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +''' +JSON related utilities. + +This module provides a few things: + + 1) A handy function for getting an object down to something that can be + JSON serialized. See to_primitive(). + + 2) Wrappers around loads() and dumps(). The dumps() wrapper will + automatically use to_primitive() for you if needed. + + 3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson + is available. +''' + + +import datetime +import inspect +import itertools +import json +import xmlrpclib + +from cloudbaseinit.openstack.common import timeutils + + +def to_primitive(value, convert_instances=False, level=0): + """Convert a complex object into primitives. + + Handy for JSON serialization. We can optionally handle instances, + but since this is a recursive function, we could have cyclical + data structures. + + To handle cyclical data structures we could track the actual objects + visited in a set, but not all objects are hashable. Instead we just + track the depth of the object inspections and don't go too deep. + + Therefore, convert_instances=True is lossy ... be aware. + + """ + nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod, + inspect.isfunction, inspect.isgeneratorfunction, + inspect.isgenerator, inspect.istraceback, inspect.isframe, + inspect.iscode, inspect.isbuiltin, inspect.isroutine, + inspect.isabstract] + for test in nasty: + if test(value): + return unicode(value) + + # value of itertools.count doesn't get caught by inspects + # above and results in infinite loop when list(value) is called. + if type(value) == itertools.count: + return unicode(value) + + # FIXME(vish): Workaround for LP bug 852095. Without this workaround, + # tests that raise an exception in a mocked method that + # has a @wrap_exception with a notifier will fail. If + # we up the dependency to 0.5.4 (when it is released) we + # can remove this workaround. + if getattr(value, '__module__', None) == 'mox': + return 'mock' + + if level > 3: + return '?' + + # The try block may not be necessary after the class check above, + # but just in case ... + try: + # It's not clear why xmlrpclib created their own DateTime type, but + # for our purposes, make it a datetime type which is explicitly + # handled + if isinstance(value, xmlrpclib.DateTime): + value = datetime.datetime(*tuple(value.timetuple())[:6]) + + if isinstance(value, (list, tuple)): + o = [] + for v in value: + o.append(to_primitive(v, convert_instances=convert_instances, + level=level)) + return o + elif isinstance(value, dict): + o = {} + for k, v in value.iteritems(): + o[k] = to_primitive(v, convert_instances=convert_instances, + level=level) + return o + elif isinstance(value, datetime.datetime): + return timeutils.strtime(value) + elif hasattr(value, 'iteritems'): + return to_primitive(dict(value.iteritems()), + convert_instances=convert_instances, + level=level + 1) + elif hasattr(value, '__iter__'): + return to_primitive(list(value), + convert_instances=convert_instances, + level=level) + elif convert_instances and hasattr(value, '__dict__'): + # Likely an instance of something. Watch for cycles. + # Ignore class member vars. + return to_primitive(value.__dict__, + convert_instances=convert_instances, + level=level + 1) + else: + return value + except TypeError: + # Class objects are tricky since they may define something like + # __iter__ defined but it isn't callable as list(). + return unicode(value) + + +def dumps(value, default=to_primitive, **kwargs): + return json.dumps(value, default=default, **kwargs) + + +def loads(s): + return json.loads(s) + + +def load(s): + return json.load(s) + + +try: + import anyjson +except ImportError: + pass +else: + anyjson._modules.append((__name__, 'dumps', TypeError, + 'loads', ValueError, 'load')) + anyjson.force_implementation(__name__) diff --git a/cloudbaseinit/openstack/common/local.py b/cloudbaseinit/openstack/common/local.py new file mode 100644 index 00000000..19d96273 --- /dev/null +++ b/cloudbaseinit/openstack/common/local.py @@ -0,0 +1,37 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Greenthread local storage of variables using weak references""" + +import weakref + +from eventlet import corolocal + + +class WeakLocal(corolocal.local): + def __getattribute__(self, attr): + rval = corolocal.local.__getattribute__(self, attr) + if rval: + rval = rval() + return rval + + def __setattr__(self, attr, value): + value = weakref.ref(value) + return corolocal.local.__setattr__(self, attr, value) + + +store = WeakLocal() diff --git a/cloudbaseinit/openstack/common/lockutils.py b/cloudbaseinit/openstack/common/lockutils.py new file mode 100644 index 00000000..c2dde3a3 --- /dev/null +++ b/cloudbaseinit/openstack/common/lockutils.py @@ -0,0 +1,233 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import errno +import functools +import os +import shutil +import tempfile +import time +import weakref + +from eventlet import semaphore + +from cloudbaseinit.openstack.common import cfg +from cloudbaseinit.openstack.common.gettextutils import _ +from cloudbaseinit.openstack.common import fileutils +from cloudbaseinit.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +util_opts = [ + cfg.BoolOpt('disable_process_locking', default=False, + help='Whether to disable inter-process locks'), + cfg.StrOpt('lock_path', + default=os.path.abspath(os.path.join(os.path.dirname(__file__), + '../')), + help='Directory to use for lock files') +] + + +CONF = cfg.CONF +CONF.register_opts(util_opts) + + +class _InterProcessLock(object): + """Lock implementation which allows multiple locks, working around + issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does + not require any cleanup. Since the lock is always held on a file + descriptor rather than outside of the process, the lock gets dropped + automatically if the process crashes, even if __exit__ is not executed. + + There are no guarantees regarding usage by multiple green threads in a + single process here. This lock works only between processes. Exclusive + access between local threads should be achieved using the semaphores + in the @synchronized decorator. + + Note these locks are released when the descriptor is closed, so it's not + safe to close the file descriptor while another green thread holds the + lock. Just opening and closing the lock file can break synchronisation, + so lock files must be accessed only using this abstraction. + """ + + def __init__(self, name): + self.lockfile = None + self.fname = name + + def __enter__(self): + self.lockfile = open(self.fname, 'w') + + while True: + try: + # Using non-blocking locks since green threads are not + # patched to deal with blocking locking calls. + # Also upon reading the MSDN docs for locking(), it seems + # to have a laughable 10 attempts "blocking" mechanism. + self.trylock() + return self + except IOError, e: + if e.errno in (errno.EACCES, errno.EAGAIN): + # external locks synchronise things like iptables + # updates - give it some time to prevent busy spinning + time.sleep(0.01) + else: + raise + + def __exit__(self, exc_type, exc_val, exc_tb): + try: + self.unlock() + self.lockfile.close() + except IOError: + LOG.exception(_("Could not release the acquired lock `%s`"), + self.fname) + + def trylock(self): + raise NotImplementedError() + + def unlock(self): + raise NotImplementedError() + + +class _WindowsLock(_InterProcessLock): + def trylock(self): + msvcrt.locking(self.lockfile, msvcrt.LK_NBLCK, 1) + + def unlock(self): + msvcrt.locking(self.lockfile, msvcrt.LK_UNLCK, 1) + + +class _PosixLock(_InterProcessLock): + def trylock(self): + fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) + + def unlock(self): + fcntl.lockf(self.lockfile, fcntl.LOCK_UN) + + +if os.name == 'nt': + import msvcrt + InterProcessLock = _WindowsLock +else: + import fcntl + InterProcessLock = _PosixLock + +_semaphores = weakref.WeakValueDictionary() + + +def synchronized(name, lock_file_prefix, external=False, lock_path=None): + """Synchronization decorator. + + Decorating a method like so:: + + @synchronized('mylock') + def foo(self, *args): + ... + + ensures that only one thread will execute the bar method at a time. + + Different methods can share the same lock:: + + @synchronized('mylock') + def foo(self, *args): + ... + + @synchronized('mylock') + def bar(self, *args): + ... + + This way only one of either foo or bar can be executing at a time. + + The lock_file_prefix argument is used to provide lock files on disk with a + meaningful prefix. The prefix should end with a hyphen ('-') if specified. + + The external keyword argument denotes whether this lock should work across + multiple processes. This means that if two different workers both run a + a method decorated with @synchronized('mylock', external=True), only one + of them will execute at a time. + + The lock_path keyword argument is used to specify a special location for + external lock files to live. If nothing is set, then CONF.lock_path is + used as a default. + """ + + def wrap(f): + @functools.wraps(f) + def inner(*args, **kwargs): + # NOTE(soren): If we ever go natively threaded, this will be racy. + # See http://stackoverflow.com/questions/5390569/dyn + # amically-allocating-and-destroying-mutexes + sem = _semaphores.get(name, semaphore.Semaphore()) + if name not in _semaphores: + # this check is not racy - we're already holding ref locally + # so GC won't remove the item and there was no IO switch + # (only valid in greenthreads) + _semaphores[name] = sem + + with sem: + LOG.debug(_('Got semaphore "%(lock)s" for method ' + '"%(method)s"...'), {'lock': name, + 'method': f.__name__}) + if external and not CONF.disable_process_locking: + LOG.debug(_('Attempting to grab file lock "%(lock)s" for ' + 'method "%(method)s"...'), + {'lock': name, 'method': f.__name__}) + cleanup_dir = False + + # We need a copy of lock_path because it is non-local + local_lock_path = lock_path + if not local_lock_path: + local_lock_path = CONF.lock_path + + if not local_lock_path: + cleanup_dir = True + local_lock_path = tempfile.mkdtemp() + + if not os.path.exists(local_lock_path): + cleanup_dir = True + fileutils.ensure_tree(local_lock_path) + + # NOTE(mikal): the lock name cannot contain directory + # separators + safe_name = name.replace(os.sep, '_') + lock_file_name = '%s%s' % (lock_file_prefix, safe_name) + lock_file_path = os.path.join(local_lock_path, + lock_file_name) + + try: + lock = InterProcessLock(lock_file_path) + with lock: + LOG.debug(_('Got file lock "%(lock)s" at %(path)s ' + 'for method "%(method)s"...'), + {'lock': name, + 'path': lock_file_path, + 'method': f.__name__}) + retval = f(*args, **kwargs) + finally: + # NOTE(vish): This removes the tempdir if we needed + # to create one. This is used to cleanup + # the locks left behind by unit tests. + if cleanup_dir: + shutil.rmtree(local_lock_path) + else: + retval = f(*args, **kwargs) + + return retval + return inner + return wrap diff --git a/cloudbaseinit/openstack/common/log.py b/cloudbaseinit/openstack/common/log.py new file mode 100644 index 00000000..2a0a2da8 --- /dev/null +++ b/cloudbaseinit/openstack/common/log.py @@ -0,0 +1,470 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Openstack logging handler. + +This module adds to logging functionality by adding the option to specify +a context object when calling the various log methods. If the context object +is not specified, default formatting is used. Additionally, an instance uuid +may be passed as part of the log message, which is intended to make it easier +for admins to find messages related to a specific instance. + +It also allows setting of formatting information through conf. + +""" + +import cStringIO +import inspect +import itertools +import logging +import logging.config +import logging.handlers +import os +import stat +import sys +import traceback + +from cloudbaseinit.openstack.common import cfg +from cloudbaseinit.openstack.common.gettextutils import _ +from cloudbaseinit.openstack.common import jsonutils +from cloudbaseinit.openstack.common import local +from cloudbaseinit.openstack.common import notifier + + +log_opts = [ + cfg.StrOpt('logging_context_format_string', + default='%(asctime)s %(levelname)s %(name)s [%(request_id)s ' + '%(user)s %(tenant)s] %(instance)s' + '%(message)s', + help='format string to use for log messages with context'), + cfg.StrOpt('logging_default_format_string', + default='%(asctime)s %(process)d %(levelname)s %(name)s [-]' + ' %(instance)s%(message)s', + help='format string to use for log messages without context'), + cfg.StrOpt('logging_debug_format_suffix', + default='%(funcName)s %(pathname)s:%(lineno)d', + help='data to append to log format when level is DEBUG'), + cfg.StrOpt('logging_exception_prefix', + default='%(asctime)s %(process)d TRACE %(name)s %(instance)s', + help='prefix each line of exception output with this format'), + cfg.ListOpt('default_log_levels', + default=[ + 'amqplib=WARN', + 'sqlalchemy=WARN', + 'boto=WARN', + 'suds=INFO', + 'keystone=INFO', + 'eventlet.wsgi.server=WARN' + ], + help='list of logger=LEVEL pairs'), + cfg.BoolOpt('publish_errors', + default=False, + help='publish error events'), + cfg.BoolOpt('fatal_deprecations', + default=False, + help='make deprecations fatal'), + + # NOTE(mikal): there are two options here because sometimes we are handed + # a full instance (and could include more information), and other times we + # are just handed a UUID for the instance. + cfg.StrOpt('instance_format', + default='[instance: %(uuid)s] ', + help='If an instance is passed with the log message, format ' + 'it like this'), + cfg.StrOpt('instance_uuid_format', + default='[instance: %(uuid)s] ', + help='If an instance UUID is passed with the log message, ' + 'format it like this'), +] + + +generic_log_opts = [ + cfg.StrOpt('logdir', + default=None, + help='Log output to a per-service log file in named directory'), + cfg.StrOpt('logfile', + default=None, + help='Log output to a named file'), + cfg.BoolOpt('use_stderr', + default=True, + help='Log output to standard error'), + cfg.StrOpt('logfile_mode', + default='0644', + help='Default file mode used when creating log files'), +] + + +CONF = cfg.CONF +CONF.register_opts(generic_log_opts) +CONF.register_opts(log_opts) + +# our new audit level +# NOTE(jkoelker) Since we synthesized an audit level, make the logging +# module aware of it so it acts like other levels. +logging.AUDIT = logging.INFO + 1 +logging.addLevelName(logging.AUDIT, 'AUDIT') + + +try: + NullHandler = logging.NullHandler +except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 + class NullHandler(logging.Handler): + def handle(self, record): + pass + + def emit(self, record): + pass + + def createLock(self): + self.lock = None + + +def _dictify_context(context): + if context is None: + return None + if not isinstance(context, dict) and getattr(context, 'to_dict', None): + context = context.to_dict() + return context + + +def _get_binary_name(): + return os.path.basename(inspect.stack()[-1][1]) + + +def _get_log_file_path(binary=None): + logfile = CONF.log_file or CONF.logfile + logdir = CONF.log_dir or CONF.logdir + + if logfile and not logdir: + return logfile + + if logfile and logdir: + return os.path.join(logdir, logfile) + + if logdir: + binary = binary or _get_binary_name() + return '%s.log' % (os.path.join(logdir, binary),) + + +class ContextAdapter(logging.LoggerAdapter): + warn = logging.LoggerAdapter.warning + + def __init__(self, logger, project_name, version_string): + self.logger = logger + self.project = project_name + self.version = version_string + + def audit(self, msg, *args, **kwargs): + self.log(logging.AUDIT, msg, *args, **kwargs) + + def deprecated(self, msg, *args, **kwargs): + stdmsg = _("Deprecated: %s") % msg + if CONF.fatal_deprecations: + self.critical(stdmsg, *args, **kwargs) + raise DeprecatedConfig(msg=stdmsg) + else: + self.warn(stdmsg, *args, **kwargs) + + def process(self, msg, kwargs): + if 'extra' not in kwargs: + kwargs['extra'] = {} + extra = kwargs['extra'] + + context = kwargs.pop('context', None) + if not context: + context = getattr(local.store, 'context', None) + if context: + extra.update(_dictify_context(context)) + + instance = kwargs.pop('instance', None) + instance_extra = '' + if instance: + instance_extra = CONF.instance_format % instance + else: + instance_uuid = kwargs.pop('instance_uuid', None) + if instance_uuid: + instance_extra = (CONF.instance_uuid_format + % {'uuid': instance_uuid}) + extra.update({'instance': instance_extra}) + + extra.update({"project": self.project}) + extra.update({"version": self.version}) + extra['extra'] = extra.copy() + return msg, kwargs + + +class JSONFormatter(logging.Formatter): + def __init__(self, fmt=None, datefmt=None): + # NOTE(jkoelker) we ignore the fmt argument, but its still there + # since logging.config.fileConfig passes it. + self.datefmt = datefmt + + def formatException(self, ei, strip_newlines=True): + lines = traceback.format_exception(*ei) + if strip_newlines: + lines = [itertools.ifilter( + lambda x: x, + line.rstrip().splitlines()) for line in lines] + lines = list(itertools.chain(*lines)) + return lines + + def format(self, record): + message = {'message': record.getMessage(), + 'asctime': self.formatTime(record, self.datefmt), + 'name': record.name, + 'msg': record.msg, + 'args': record.args, + 'levelname': record.levelname, + 'levelno': record.levelno, + 'pathname': record.pathname, + 'filename': record.filename, + 'module': record.module, + 'lineno': record.lineno, + 'funcname': record.funcName, + 'created': record.created, + 'msecs': record.msecs, + 'relative_created': record.relativeCreated, + 'thread': record.thread, + 'thread_name': record.threadName, + 'process_name': record.processName, + 'process': record.process, + 'traceback': None} + + if hasattr(record, 'extra'): + message['extra'] = record.extra + + if record.exc_info: + message['traceback'] = self.formatException(record.exc_info) + + return jsonutils.dumps(message) + + +class PublishErrorsHandler(logging.Handler): + def emit(self, record): + if ('cloudbaseinit.openstack.common.notifier.log_notifier' in + CONF.notification_driver): + return + notifier.api.notify(None, 'error.publisher', + 'error_notification', + notifier.api.ERROR, + dict(error=record.msg)) + + +def _create_logging_excepthook(product_name): + def logging_excepthook(type, value, tb): + extra = {} + if CONF.verbose: + extra['exc_info'] = (type, value, tb) + getLogger(product_name).critical(str(value), **extra) + return logging_excepthook + + +def setup(product_name): + """Setup logging.""" + sys.excepthook = _create_logging_excepthook(product_name) + + if CONF.log_config: + try: + logging.config.fileConfig(CONF.log_config) + except Exception: + traceback.print_exc() + raise + else: + _setup_logging_from_conf(product_name) + + +def _find_facility_from_conf(): + facility_names = logging.handlers.SysLogHandler.facility_names + facility = getattr(logging.handlers.SysLogHandler, + CONF.syslog_log_facility, + None) + + if facility is None and CONF.syslog_log_facility in facility_names: + facility = facility_names.get(CONF.syslog_log_facility) + + if facility is None: + valid_facilities = facility_names.keys() + consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', + 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', + 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', + 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', + 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] + valid_facilities.extend(consts) + raise TypeError(_('syslog facility must be one of: %s') % + ', '.join("'%s'" % fac + for fac in valid_facilities)) + + return facility + + +def _setup_logging_from_conf(product_name): + log_root = getLogger(product_name).logger + for handler in log_root.handlers: + log_root.removeHandler(handler) + + if CONF.use_syslog: + facility = _find_facility_from_conf() + syslog = logging.handlers.SysLogHandler(address='/dev/log', + facility=facility) + log_root.addHandler(syslog) + + logpath = _get_log_file_path() + if logpath: + filelog = logging.handlers.WatchedFileHandler(logpath) + log_root.addHandler(filelog) + + mode = int(CONF.logfile_mode, 8) + st = os.stat(logpath) + if st.st_mode != (stat.S_IFREG | mode): + os.chmod(logpath, mode) + + if CONF.use_stderr: + streamlog = ColorHandler() + log_root.addHandler(streamlog) + + elif not CONF.log_file: + # pass sys.stdout as a positional argument + # python2.6 calls the argument strm, in 2.7 it's stream + streamlog = logging.StreamHandler(sys.stdout) + log_root.addHandler(streamlog) + + if CONF.publish_errors: + log_root.addHandler(PublishErrorsHandler(logging.ERROR)) + + for handler in log_root.handlers: + datefmt = CONF.log_date_format + if CONF.log_format: + handler.setFormatter(logging.Formatter(fmt=CONF.log_format, + datefmt=datefmt)) + handler.setFormatter(LegacyFormatter(datefmt=datefmt)) + + if CONF.verbose or CONF.debug: + log_root.setLevel(logging.DEBUG) + else: + log_root.setLevel(logging.INFO) + + level = logging.NOTSET + for pair in CONF.default_log_levels: + mod, _sep, level_name = pair.partition('=') + level = logging.getLevelName(level_name) + logger = logging.getLogger(mod) + logger.setLevel(level) + for handler in log_root.handlers: + logger.addHandler(handler) + +_loggers = {} + + +def getLogger(name='unknown', version='unknown'): + if name not in _loggers: + _loggers[name] = ContextAdapter(logging.getLogger(name), + name, + version) + return _loggers[name] + + +class WritableLogger(object): + """A thin wrapper that responds to `write` and logs.""" + + def __init__(self, logger, level=logging.INFO): + self.logger = logger + self.level = level + + def write(self, msg): + self.logger.log(self.level, msg) + + +class LegacyFormatter(logging.Formatter): + """A context.RequestContext aware formatter configured through flags. + + The flags used to set format strings are: logging_context_format_string + and logging_default_format_string. You can also specify + logging_debug_format_suffix to append extra formatting if the log level is + debug. + + For information about what variables are available for the formatter see: + http://docs.python.org/library/logging.html#formatter + + """ + + def format(self, record): + """Uses contextstring if request_id is set, otherwise default.""" + # NOTE(sdague): default the fancier formating params + # to an empty string so we don't throw an exception if + # they get used + for key in ('instance', 'color'): + if key not in record.__dict__: + record.__dict__[key] = '' + + if record.__dict__.get('request_id', None): + self._fmt = CONF.logging_context_format_string + else: + self._fmt = CONF.logging_default_format_string + + if (record.levelno == logging.DEBUG and + CONF.logging_debug_format_suffix): + self._fmt += " " + CONF.logging_debug_format_suffix + + # Cache this on the record, Logger will respect our formated copy + if record.exc_info: + record.exc_text = self.formatException(record.exc_info, record) + return logging.Formatter.format(self, record) + + def formatException(self, exc_info, record=None): + """Format exception output with CONF.logging_exception_prefix.""" + if not record: + return logging.Formatter.formatException(self, exc_info) + + stringbuffer = cStringIO.StringIO() + traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], + None, stringbuffer) + lines = stringbuffer.getvalue().split('\n') + stringbuffer.close() + + if CONF.logging_exception_prefix.find('%(asctime)') != -1: + record.asctime = self.formatTime(record, self.datefmt) + + formatted_lines = [] + for line in lines: + pl = CONF.logging_exception_prefix % record.__dict__ + fl = '%s%s' % (pl, line) + formatted_lines.append(fl) + return '\n'.join(formatted_lines) + + +class ColorHandler(logging.StreamHandler): + LEVEL_COLORS = { + logging.DEBUG: '\033[00;32m', # GREEN + logging.INFO: '\033[00;36m', # CYAN + logging.AUDIT: '\033[01;36m', # BOLD CYAN + logging.WARN: '\033[01;33m', # BOLD YELLOW + logging.ERROR: '\033[01;31m', # BOLD RED + logging.CRITICAL: '\033[01;31m', # BOLD RED + } + + def format(self, record): + record.color = self.LEVEL_COLORS[record.levelno] + return logging.StreamHandler.format(self, record) + + +class DeprecatedConfig(Exception): + message = _("Fatal call to deprecated config: %(msg)s") + + def __init__(self, msg): + super(Exception, self).__init__(self.message % dict(msg=msg)) diff --git a/cloudbaseinit/openstack/common/network_utils.py b/cloudbaseinit/openstack/common/network_utils.py new file mode 100644 index 00000000..69f67321 --- /dev/null +++ b/cloudbaseinit/openstack/common/network_utils.py @@ -0,0 +1,68 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Network-related utilities and helper functions. +""" + +import logging + +LOG = logging.getLogger(__name__) + + +def parse_host_port(address, default_port=None): + """ + Interpret a string as a host:port pair. + An IPv6 address MUST be escaped if accompanied by a port, + because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334 + means both [2001:db8:85a3::8a2e:370:7334] and + [2001:db8:85a3::8a2e:370]:7334. + + >>> parse_host_port('server01:80') + ('server01', 80) + >>> parse_host_port('server01') + ('server01', None) + >>> parse_host_port('server01', default_port=1234) + ('server01', 1234) + >>> parse_host_port('[::1]:80') + ('::1', 80) + >>> parse_host_port('[::1]') + ('::1', None) + >>> parse_host_port('[::1]', default_port=1234) + ('::1', 1234) + >>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234) + ('2001:db8:85a3::8a2e:370:7334', 1234) + + """ + if address[0] == '[': + # Escaped ipv6 + _host, _port = address[1:].split(']') + host = _host + if ':' in _port: + port = _port.split(':')[1] + else: + port = default_port + else: + if address.count(':') == 1: + host, port = address.split(':') + else: + # 0 means ipv4, >1 means ipv6. + # We prohibit unescaped ipv6 addresses with port. + host = address + port = default_port + + return (host, None if port is None else int(port)) diff --git a/cloudbaseinit/openstack/common/notifier/__init__.py b/cloudbaseinit/openstack/common/notifier/__init__.py new file mode 100644 index 00000000..482d54e4 --- /dev/null +++ b/cloudbaseinit/openstack/common/notifier/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cloudbaseinit/openstack/common/notifier/api.py b/cloudbaseinit/openstack/common/notifier/api.py new file mode 100644 index 00000000..1e89698a --- /dev/null +++ b/cloudbaseinit/openstack/common/notifier/api.py @@ -0,0 +1,182 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from cloudbaseinit.openstack.common import cfg +from cloudbaseinit.openstack.common import context +from cloudbaseinit.openstack.common.gettextutils import _ +from cloudbaseinit.openstack.common import importutils +from cloudbaseinit.openstack.common import jsonutils +from cloudbaseinit.openstack.common import log as logging +from cloudbaseinit.openstack.common import timeutils + + +LOG = logging.getLogger(__name__) + +notifier_opts = [ + cfg.MultiStrOpt('notification_driver', + default=[], + deprecated_name='list_notifier_drivers', + help='Driver or drivers to handle sending notifications'), + cfg.StrOpt('default_notification_level', + default='INFO', + help='Default notification level for outgoing notifications'), + cfg.StrOpt('default_publisher_id', + default='$host', + help='Default publisher_id for outgoing notifications'), +] + +CONF = cfg.CONF +CONF.register_opts(notifier_opts) + +WARN = 'WARN' +INFO = 'INFO' +ERROR = 'ERROR' +CRITICAL = 'CRITICAL' +DEBUG = 'DEBUG' + +log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL) + + +class BadPriorityException(Exception): + pass + + +def notify_decorator(name, fn): + """ decorator for notify which is used from utils.monkey_patch() + + :param name: name of the function + :param function: - object of the function + :returns: function -- decorated function + + """ + def wrapped_func(*args, **kwarg): + body = {} + body['args'] = [] + body['kwarg'] = {} + for arg in args: + body['args'].append(arg) + for key in kwarg: + body['kwarg'][key] = kwarg[key] + + ctxt = context.get_context_from_function_and_args(fn, args, kwarg) + notify(ctxt, + CONF.default_publisher_id, + name, + CONF.default_notification_level, + body) + return fn(*args, **kwarg) + return wrapped_func + + +def publisher_id(service, host=None): + if not host: + host = CONF.host + return "%s.%s" % (service, host) + + +def notify(context, publisher_id, event_type, priority, payload): + """Sends a notification using the specified driver + + :param publisher_id: the source worker_type.host of the message + :param event_type: the literal type of event (ex. Instance Creation) + :param priority: patterned after the enumeration of Python logging + levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) + :param payload: A python dictionary of attributes + + Outgoing message format includes the above parameters, and appends the + following: + + message_id + a UUID representing the id for this notification + + timestamp + the GMT timestamp the notification was sent at + + The composite message will be constructed as a dictionary of the above + attributes, which will then be sent via the transport mechanism defined + by the driver. + + Message example:: + + {'message_id': str(uuid.uuid4()), + 'publisher_id': 'compute.host1', + 'timestamp': timeutils.utcnow(), + 'priority': 'WARN', + 'event_type': 'compute.create_instance', + 'payload': {'instance_id': 12, ... }} + + """ + if priority not in log_levels: + raise BadPriorityException( + _('%s not in valid priorities') % priority) + + # Ensure everything is JSON serializable. + payload = jsonutils.to_primitive(payload, convert_instances=True) + + msg = dict(message_id=str(uuid.uuid4()), + publisher_id=publisher_id, + event_type=event_type, + priority=priority, + payload=payload, + timestamp=str(timeutils.utcnow())) + + for driver in _get_drivers(): + try: + driver.notify(context, msg) + except Exception as e: + LOG.exception(_("Problem '%(e)s' attempting to " + "send to notification system. " + "Payload=%(payload)s") + % dict(e=e, payload=payload)) + + +_drivers = None + + +def _get_drivers(): + """Instantiate, cache, and return drivers based on the CONF.""" + global _drivers + if _drivers is None: + _drivers = {} + for notification_driver in CONF.notification_driver: + add_driver(notification_driver) + + return _drivers.values() + + +def add_driver(notification_driver): + """Add a notification driver at runtime.""" + # Make sure the driver list is initialized. + _get_drivers() + if isinstance(notification_driver, basestring): + # Load and add + try: + driver = importutils.import_module(notification_driver) + _drivers[notification_driver] = driver + except ImportError: + LOG.exception(_("Failed to load notifier %s. " + "These notifications will not be sent.") % + notification_driver) + else: + # Driver is already loaded; just add the object. + _drivers[notification_driver] = notification_driver + + +def _reset_drivers(): + """Used by unit tests to reset the drivers.""" + global _drivers + _drivers = None diff --git a/cloudbaseinit/openstack/common/notifier/log_notifier.py b/cloudbaseinit/openstack/common/notifier/log_notifier.py new file mode 100644 index 00000000..cb5b3864 --- /dev/null +++ b/cloudbaseinit/openstack/common/notifier/log_notifier.py @@ -0,0 +1,35 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cloudbaseinit.openstack.common import cfg +from cloudbaseinit.openstack.common import jsonutils +from cloudbaseinit.openstack.common import log as logging + + +CONF = cfg.CONF + + +def notify(_context, message): + """Notifies the recipient of the desired event given the model. + Log notifications using openstack's default logging system""" + + priority = message.get('priority', + CONF.default_notification_level) + priority = priority.lower() + logger = logging.getLogger( + 'cloudbaseinit.openstack.common.notification.%s' % + message['event_type']) + getattr(logger, priority)(jsonutils.dumps(message)) diff --git a/cloudbaseinit/openstack/common/notifier/no_op_notifier.py b/cloudbaseinit/openstack/common/notifier/no_op_notifier.py new file mode 100644 index 00000000..ee1ddbdc --- /dev/null +++ b/cloudbaseinit/openstack/common/notifier/no_op_notifier.py @@ -0,0 +1,19 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +def notify(_context, message): + """Notifies the recipient of the desired event given the model""" + pass diff --git a/cloudbaseinit/openstack/common/notifier/rabbit_notifier.py b/cloudbaseinit/openstack/common/notifier/rabbit_notifier.py new file mode 100644 index 00000000..78f1f675 --- /dev/null +++ b/cloudbaseinit/openstack/common/notifier/rabbit_notifier.py @@ -0,0 +1,29 @@ +# Copyright 2012 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cloudbaseinit.openstack.common.gettextutils import _ +from cloudbaseinit.openstack.common import log as logging +from cloudbaseinit.openstack.common.notifier import rpc_notifier + +LOG = logging.getLogger(__name__) + + +def notify(context, message): + """Deprecated in Grizzly. Please use rpc_notifier instead.""" + + LOG.deprecated(_("The rabbit_notifier is now deprecated." + " Please use rpc_notifier instead.")) + rpc_notifier.notify(context, message) diff --git a/cloudbaseinit/openstack/common/notifier/rpc_notifier.py b/cloudbaseinit/openstack/common/notifier/rpc_notifier.py new file mode 100644 index 00000000..8f4b30a0 --- /dev/null +++ b/cloudbaseinit/openstack/common/notifier/rpc_notifier.py @@ -0,0 +1,46 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cloudbaseinit.openstack.common import cfg +from cloudbaseinit.openstack.common import context as req_context +from cloudbaseinit.openstack.common.gettextutils import _ +from cloudbaseinit.openstack.common import log as logging +from cloudbaseinit.openstack.common import rpc + +LOG = logging.getLogger(__name__) + +notification_topic_opt = cfg.ListOpt( + 'notification_topics', default=['notifications', ], + help='AMQP topic used for openstack notifications') + +CONF = cfg.CONF +CONF.register_opt(notification_topic_opt) + + +def notify(context, message): + """Sends a notification via RPC""" + if not context: + context = req_context.get_admin_context() + priority = message.get('priority', + CONF.default_notification_level) + priority = priority.lower() + for topic in CONF.notification_topics: + topic = '%s.%s' % (topic, priority) + try: + rpc.notify(context, topic, message) + except Exception: + LOG.exception(_("Could not send notification to %(topic)s. " + "Payload=%(message)s"), locals()) diff --git a/cloudbaseinit/openstack/common/notifier/test_notifier.py b/cloudbaseinit/openstack/common/notifier/test_notifier.py new file mode 100644 index 00000000..5e348803 --- /dev/null +++ b/cloudbaseinit/openstack/common/notifier/test_notifier.py @@ -0,0 +1,22 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +NOTIFICATIONS = [] + + +def notify(_context, message): + """Test notifier, stores notifications in memory for unittests.""" + NOTIFICATIONS.append(message) diff --git a/cloudbaseinit/openstack/common/plugin/__init__.py b/cloudbaseinit/openstack/common/plugin/__init__.py new file mode 100644 index 00000000..63c3905e --- /dev/null +++ b/cloudbaseinit/openstack/common/plugin/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cloudbaseinit/openstack/common/plugin/callbackplugin.py b/cloudbaseinit/openstack/common/plugin/callbackplugin.py new file mode 100644 index 00000000..1d414bbe --- /dev/null +++ b/cloudbaseinit/openstack/common/plugin/callbackplugin.py @@ -0,0 +1,93 @@ +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cloudbaseinit.openstack.common import log as logging +from cloudbaseinit.openstack.common.plugin import plugin + + +LOG = logging.getLogger(__name__) + + +class _CallbackNotifier(object): + """Manages plugin-defined notification callbacks. + + For each Plugin, a CallbackNotifier will be added to the + notification driver list. Calls to notify() with appropriate + messages will be hooked and prompt callbacks. + + A callback should look like this: + def callback(context, message, user_data) + """ + + def __init__(self): + self._callback_dict = {} + + def _add_callback(self, event_type, callback, user_data): + callback_list = self._callback_dict.get(event_type, []) + callback_list.append({'function': callback, + 'user_data': user_data}) + self._callback_dict[event_type] = callback_list + + def _remove_callback(self, callback): + for callback_list in self._callback_dict.values(): + for entry in callback_list: + if entry['function'] == callback: + callback_list.remove(entry) + + def notify(self, context, message): + if message.get('event_type') not in self._callback_dict: + return + + for entry in self._callback_dict[message.get('event_type')]: + entry['function'](context, message, entry.get('user_data')) + + def callbacks(self): + return self._callback_dict + + +class CallbackPlugin(plugin.Plugin): + """ Plugin with a simple callback interface. + + This class is provided as a convenience for producing a simple + plugin that only watches a couple of events. For example, here's + a subclass which prints a line the first time an instance is created. + + class HookInstanceCreation(CallbackPlugin): + + def __init__(self, _service_name): + super(HookInstanceCreation, self).__init__() + self._add_callback(self.magic, 'compute.instance.create.start') + + def magic(self): + print "An instance was created!" + self._remove_callback(self, self.magic) + """ + + def __init__(self, service_name): + super(CallbackPlugin, self).__init__(service_name) + self._callback_notifier = _CallbackNotifier() + self._add_notifier(self._callback_notifier) + + def _add_callback(self, callback, event_type, user_data=None): + """Add callback for a given event notification. + + Subclasses can call this as an alternative to implementing + a fullblown notify notifier. + """ + self._callback_notifier._add_callback(event_type, callback, user_data) + + def _remove_callback(self, callback): + """Remove all notification callbacks to specified function.""" + self._callback_notifier._remove_callback(callback) diff --git a/cloudbaseinit/openstack/common/plugin/plugin.py b/cloudbaseinit/openstack/common/plugin/plugin.py new file mode 100644 index 00000000..27920df0 --- /dev/null +++ b/cloudbaseinit/openstack/common/plugin/plugin.py @@ -0,0 +1,86 @@ +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cloudbaseinit.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class Plugin(object): + """Defines an interface for adding functionality to an OpenStack service. + + A plugin interacts with a service via the following pathways: + + - An optional set of notifiers, managed by calling add_notifier() + or by overriding _notifiers() + + - A set of api extensions, managed via add_api_extension_descriptor() + + - Direct calls to service functions. + + - Whatever else the plugin wants to do on its own. + + This is the reference implementation. + """ + + # The following functions are provided as convenience methods + # for subclasses. Subclasses should call them but probably not + # override them. + def _add_api_extension_descriptor(self, descriptor): + """Subclass convenience method which adds an extension descriptor. + + Subclass constructors should call this method when + extending a project's REST interface. + + Note that once the api service has loaded, the + API extension set is more-or-less fixed, so + this should mainly be called by subclass constructors. + """ + self._api_extension_descriptors.append(descriptor) + + def _add_notifier(self, notifier): + """Subclass convenience method which adds a notifier. + + Notifier objects should implement the function notify(message). + Each notifier receives a notify() call whenever an openstack + service broadcasts a notification. + + Best to call this during construction. Notifiers are enumerated + and registered by the pluginmanager at plugin load time. + """ + self._notifiers.append(notifier) + + # The following methods are called by OpenStack services to query + # plugin features. Subclasses should probably not override these. + def _notifiers(self): + """Returns list of notifiers for this plugin.""" + return self._notifiers + + notifiers = property(_notifiers) + + def _api_extension_descriptors(self): + """Return a list of API extension descriptors. + + Called by a project API during its load sequence. + """ + return self._api_extension_descriptors + + api_extension_descriptors = property(_api_extension_descriptors) + + # Most plugins will override this: + def __init__(self, service_name): + self._notifiers = [] + self._api_extension_descriptors = [] diff --git a/cloudbaseinit/openstack/common/plugin/pluginmanager.py b/cloudbaseinit/openstack/common/plugin/pluginmanager.py new file mode 100644 index 00000000..92cb53b4 --- /dev/null +++ b/cloudbaseinit/openstack/common/plugin/pluginmanager.py @@ -0,0 +1,77 @@ +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import pkg_resources + +from cloudbaseinit.openstack.common import cfg +from cloudbaseinit.openstack.common.gettextutils import _ +from cloudbaseinit.openstack.common import log as logging +from cloudbaseinit.openstack.common.notifier import api as notifier_api + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class PluginManager(object): + """Manages plugin entrypoints and loading. + + For a service to implement this plugin interface for callback purposes: + + - Make use of the openstack-common notifier system + - Instantiate this manager in each process (passing in + project and service name) + + For an API service to extend itself using this plugin interface, + it needs to query the plugin_extension_factory provided by + the already-instantiated PluginManager. + """ + + def __init__(self, project_name, service_name): + """ Construct Plugin Manager; load and initialize plugins. + + project_name (e.g. 'nova' or 'glance') is used + to construct the entry point that identifies plugins. + + The service_name (e.g. 'compute') is passed on to + each plugin as a raw string for it to do what it will. + """ + self._project_name = project_name + self._service_name = service_name + self.plugins = [] + + def load_plugins(self): + self.plugins = [] + + for entrypoint in pkg_resources.iter_entry_points('%s.plugin' % + self._project_name): + try: + pluginclass = entrypoint.load() + plugin = pluginclass(self._service_name) + self.plugins.append(plugin) + except Exception, exc: + LOG.error(_("Failed to load plugin %(plug)s: %(exc)s") % + {'plug': entrypoint, 'exc': exc}) + + # Register individual notifiers. + for plugin in self.plugins: + for notifier in plugin.notifiers: + notifier_api.add_driver(notifier) + + def plugin_extension_factory(self, ext_mgr): + for plugin in self.plugins: + descriptors = plugin.api_extension_descriptors + for descriptor in descriptors: + ext_mgr.load_extension(descriptor) diff --git a/cloudbaseinit/openstack/common/policy.py b/cloudbaseinit/openstack/common/policy.py new file mode 100644 index 00000000..8b463ec3 --- /dev/null +++ b/cloudbaseinit/openstack/common/policy.py @@ -0,0 +1,779 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Common Policy Engine Implementation + +Policies can be expressed in one of two forms: A list of lists, or a +string written in the new policy language. + +In the list-of-lists representation, each check inside the innermost +list is combined as with an "and" conjunction--for that check to pass, +all the specified checks must pass. These innermost lists are then +combined as with an "or" conjunction. This is the original way of +expressing policies, but there now exists a new way: the policy +language. + +In the policy language, each check is specified the same way as in the +list-of-lists representation: a simple "a:b" pair that is matched to +the correct code to perform that check. However, conjunction +operators are available, allowing for more expressiveness in crafting +policies. + +As an example, take the following rule, expressed in the list-of-lists +representation:: + + [["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]] + +In the policy language, this becomes:: + + role:admin or (project_id:%(project_id)s and role:projectadmin) + +The policy language also has the "not" operator, allowing a richer +policy rule:: + + project_id:%(project_id)s and not role:dunce + +Finally, two special policy checks should be mentioned; the policy +check "@" will always accept an access, and the policy check "!" will +always reject an access. (Note that if a rule is either the empty +list ("[]") or the empty string, this is equivalent to the "@" policy +check.) Of these, the "!" policy check is probably the most useful, +as it allows particular rules to be explicitly disabled. +""" + +import abc +import logging +import re +import urllib + +import urllib2 + +from cloudbaseinit.openstack.common.gettextutils import _ +from cloudbaseinit.openstack.common import jsonutils + + +LOG = logging.getLogger(__name__) + + +_rules = None +_checks = {} + + +class Rules(dict): + """ + A store for rules. Handles the default_rule setting directly. + """ + + @classmethod + def load_json(cls, data, default_rule=None): + """ + Allow loading of JSON rule data. + """ + + # Suck in the JSON data and parse the rules + rules = dict((k, parse_rule(v)) for k, v in + jsonutils.loads(data).items()) + + return cls(rules, default_rule) + + def __init__(self, rules=None, default_rule=None): + """Initialize the Rules store.""" + + super(Rules, self).__init__(rules or {}) + self.default_rule = default_rule + + def __missing__(self, key): + """Implements the default rule handling.""" + + # If the default rule isn't actually defined, do something + # reasonably intelligent + if not self.default_rule or self.default_rule not in self: + raise KeyError(key) + + return self[self.default_rule] + + def __str__(self): + """Dumps a string representation of the rules.""" + + # Start by building the canonical strings for the rules + out_rules = {} + for key, value in self.items(): + # Use empty string for singleton TrueCheck instances + if isinstance(value, TrueCheck): + out_rules[key] = '' + else: + out_rules[key] = str(value) + + # Dump a pretty-printed JSON representation + return jsonutils.dumps(out_rules, indent=4) + + +# Really have to figure out a way to deprecate this +def set_rules(rules): + """Set the rules in use for policy checks.""" + + global _rules + + _rules = rules + + +# Ditto +def reset(): + """Clear the rules used for policy checks.""" + + global _rules + + _rules = None + + +def check(rule, target, creds, exc=None, *args, **kwargs): + """ + Checks authorization of a rule against the target and credentials. + + :param rule: The rule to evaluate. + :param target: As much information about the object being operated + on as possible, as a dictionary. + :param creds: As much information about the user performing the + action as possible, as a dictionary. + :param exc: Class of the exception to raise if the check fails. + Any remaining arguments passed to check() (both + positional and keyword arguments) will be passed to + the exception class. If exc is not provided, returns + False. + + :return: Returns False if the policy does not allow the action and + exc is not provided; otherwise, returns a value that + evaluates to True. Note: for rules using the "case" + expression, this True value will be the specified string + from the expression. + """ + + # Allow the rule to be a Check tree + if isinstance(rule, BaseCheck): + result = rule(target, creds) + elif not _rules: + # No rules to reference means we're going to fail closed + result = False + else: + try: + # Evaluate the rule + result = _rules[rule](target, creds) + except KeyError: + # If the rule doesn't exist, fail closed + result = False + + # If it is False, raise the exception if requested + if exc and result is False: + raise exc(*args, **kwargs) + + return result + + +class BaseCheck(object): + """ + Abstract base class for Check classes. + """ + + __metaclass__ = abc.ABCMeta + + @abc.abstractmethod + def __str__(self): + """ + Retrieve a string representation of the Check tree rooted at + this node. + """ + + pass + + @abc.abstractmethod + def __call__(self, target, cred): + """ + Perform the check. Returns False to reject the access or a + true value (not necessary True) to accept the access. + """ + + pass + + +class FalseCheck(BaseCheck): + """ + A policy check that always returns False (disallow). + """ + + def __str__(self): + """Return a string representation of this check.""" + + return "!" + + def __call__(self, target, cred): + """Check the policy.""" + + return False + + +class TrueCheck(BaseCheck): + """ + A policy check that always returns True (allow). + """ + + def __str__(self): + """Return a string representation of this check.""" + + return "@" + + def __call__(self, target, cred): + """Check the policy.""" + + return True + + +class Check(BaseCheck): + """ + A base class to allow for user-defined policy checks. + """ + + def __init__(self, kind, match): + """ + :param kind: The kind of the check, i.e., the field before the + ':'. + :param match: The match of the check, i.e., the field after + the ':'. + """ + + self.kind = kind + self.match = match + + def __str__(self): + """Return a string representation of this check.""" + + return "%s:%s" % (self.kind, self.match) + + +class NotCheck(BaseCheck): + """ + A policy check that inverts the result of another policy check. + Implements the "not" operator. + """ + + def __init__(self, rule): + """ + Initialize the 'not' check. + + :param rule: The rule to negate. Must be a Check. + """ + + self.rule = rule + + def __str__(self): + """Return a string representation of this check.""" + + return "not %s" % self.rule + + def __call__(self, target, cred): + """ + Check the policy. Returns the logical inverse of the wrapped + check. + """ + + return not self.rule(target, cred) + + +class AndCheck(BaseCheck): + """ + A policy check that requires that a list of other checks all + return True. Implements the "and" operator. + """ + + def __init__(self, rules): + """ + Initialize the 'and' check. + + :param rules: A list of rules that will be tested. + """ + + self.rules = rules + + def __str__(self): + """Return a string representation of this check.""" + + return "(%s)" % ' and '.join(str(r) for r in self.rules) + + def __call__(self, target, cred): + """ + Check the policy. Requires that all rules accept in order to + return True. + """ + + for rule in self.rules: + if not rule(target, cred): + return False + + return True + + def add_check(self, rule): + """ + Allows addition of another rule to the list of rules that will + be tested. Returns the AndCheck object for convenience. + """ + + self.rules.append(rule) + return self + + +class OrCheck(BaseCheck): + """ + A policy check that requires that at least one of a list of other + checks returns True. Implements the "or" operator. + """ + + def __init__(self, rules): + """ + Initialize the 'or' check. + + :param rules: A list of rules that will be tested. + """ + + self.rules = rules + + def __str__(self): + """Return a string representation of this check.""" + + return "(%s)" % ' or '.join(str(r) for r in self.rules) + + def __call__(self, target, cred): + """ + Check the policy. Requires that at least one rule accept in + order to return True. + """ + + for rule in self.rules: + if rule(target, cred): + return True + + return False + + def add_check(self, rule): + """ + Allows addition of another rule to the list of rules that will + be tested. Returns the OrCheck object for convenience. + """ + + self.rules.append(rule) + return self + + +def _parse_check(rule): + """ + Parse a single base check rule into an appropriate Check object. + """ + + # Handle the special checks + if rule == '!': + return FalseCheck() + elif rule == '@': + return TrueCheck() + + try: + kind, match = rule.split(':', 1) + except Exception: + LOG.exception(_("Failed to understand rule %(rule)s") % locals()) + # If the rule is invalid, we'll fail closed + return FalseCheck() + + # Find what implements the check + if kind in _checks: + return _checks[kind](kind, match) + elif None in _checks: + return _checks[None](kind, match) + else: + LOG.error(_("No handler for matches of kind %s") % kind) + return FalseCheck() + + +def _parse_list_rule(rule): + """ + Provided for backwards compatibility. Translates the old + list-of-lists syntax into a tree of Check objects. + """ + + # Empty rule defaults to True + if not rule: + return TrueCheck() + + # Outer list is joined by "or"; inner list by "and" + or_list = [] + for inner_rule in rule: + # Elide empty inner lists + if not inner_rule: + continue + + # Handle bare strings + if isinstance(inner_rule, basestring): + inner_rule = [inner_rule] + + # Parse the inner rules into Check objects + and_list = [_parse_check(r) for r in inner_rule] + + # Append the appropriate check to the or_list + if len(and_list) == 1: + or_list.append(and_list[0]) + else: + or_list.append(AndCheck(and_list)) + + # If we have only one check, omit the "or" + if len(or_list) == 0: + return FalseCheck() + elif len(or_list) == 1: + return or_list[0] + + return OrCheck(or_list) + + +# Used for tokenizing the policy language +_tokenize_re = re.compile(r'\s+') + + +def _parse_tokenize(rule): + """ + Tokenizer for the policy language. + + Most of the single-character tokens are specified in the + _tokenize_re; however, parentheses need to be handled specially, + because they can appear inside a check string. Thankfully, those + parentheses that appear inside a check string can never occur at + the very beginning or end ("%(variable)s" is the correct syntax). + """ + + for tok in _tokenize_re.split(rule): + # Skip empty tokens + if not tok or tok.isspace(): + continue + + # Handle leading parens on the token + clean = tok.lstrip('(') + for i in range(len(tok) - len(clean)): + yield '(', '(' + + # If it was only parentheses, continue + if not clean: + continue + else: + tok = clean + + # Handle trailing parens on the token + clean = tok.rstrip(')') + trail = len(tok) - len(clean) + + # Yield the cleaned token + lowered = clean.lower() + if lowered in ('and', 'or', 'not'): + # Special tokens + yield lowered, clean + elif clean: + # Not a special token, but not composed solely of ')' + if len(tok) >= 2 and ((tok[0], tok[-1]) in + [('"', '"'), ("'", "'")]): + # It's a quoted string + yield 'string', tok[1:-1] + else: + yield 'check', _parse_check(clean) + + # Yield the trailing parens + for i in range(trail): + yield ')', ')' + + +class ParseStateMeta(type): + """ + Metaclass for the ParseState class. Facilitates identifying + reduction methods. + """ + + def __new__(mcs, name, bases, cls_dict): + """ + Create the class. Injects the 'reducers' list, a list of + tuples matching token sequences to the names of the + corresponding reduction methods. + """ + + reducers = [] + + for key, value in cls_dict.items(): + if not hasattr(value, 'reducers'): + continue + for reduction in value.reducers: + reducers.append((reduction, key)) + + cls_dict['reducers'] = reducers + + return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict) + + +def reducer(*tokens): + """ + Decorator for reduction methods. Arguments are a sequence of + tokens, in order, which should trigger running this reduction + method. + """ + + def decorator(func): + # Make sure we have a list of reducer sequences + if not hasattr(func, 'reducers'): + func.reducers = [] + + # Add the tokens to the list of reducer sequences + func.reducers.append(list(tokens)) + + return func + + return decorator + + +class ParseState(object): + """ + Implement the core of parsing the policy language. Uses a greedy + reduction algorithm to reduce a sequence of tokens into a single + terminal, the value of which will be the root of the Check tree. + + Note: error reporting is rather lacking. The best we can get with + this parser formulation is an overall "parse failed" error. + Fortunately, the policy language is simple enough that this + shouldn't be that big a problem. + """ + + __metaclass__ = ParseStateMeta + + def __init__(self): + """Initialize the ParseState.""" + + self.tokens = [] + self.values = [] + + def reduce(self): + """ + Perform a greedy reduction of the token stream. If a reducer + method matches, it will be executed, then the reduce() method + will be called recursively to search for any more possible + reductions. + """ + + for reduction, methname in self.reducers: + if (len(self.tokens) >= len(reduction) and + self.tokens[-len(reduction):] == reduction): + # Get the reduction method + meth = getattr(self, methname) + + # Reduce the token stream + results = meth(*self.values[-len(reduction):]) + + # Update the tokens and values + self.tokens[-len(reduction):] = [r[0] for r in results] + self.values[-len(reduction):] = [r[1] for r in results] + + # Check for any more reductions + return self.reduce() + + def shift(self, tok, value): + """Adds one more token to the state. Calls reduce().""" + + self.tokens.append(tok) + self.values.append(value) + + # Do a greedy reduce... + self.reduce() + + @property + def result(self): + """ + Obtain the final result of the parse. Raises ValueError if + the parse failed to reduce to a single result. + """ + + if len(self.values) != 1: + raise ValueError("Could not parse rule") + return self.values[0] + + @reducer('(', 'check', ')') + @reducer('(', 'and_expr', ')') + @reducer('(', 'or_expr', ')') + def _wrap_check(self, _p1, check, _p2): + """Turn parenthesized expressions into a 'check' token.""" + + return [('check', check)] + + @reducer('check', 'and', 'check') + def _make_and_expr(self, check1, _and, check2): + """ + Create an 'and_expr' from two checks joined by the 'and' + operator. + """ + + return [('and_expr', AndCheck([check1, check2]))] + + @reducer('and_expr', 'and', 'check') + def _extend_and_expr(self, and_expr, _and, check): + """ + Extend an 'and_expr' by adding one more check. + """ + + return [('and_expr', and_expr.add_check(check))] + + @reducer('check', 'or', 'check') + def _make_or_expr(self, check1, _or, check2): + """ + Create an 'or_expr' from two checks joined by the 'or' + operator. + """ + + return [('or_expr', OrCheck([check1, check2]))] + + @reducer('or_expr', 'or', 'check') + def _extend_or_expr(self, or_expr, _or, check): + """ + Extend an 'or_expr' by adding one more check. + """ + + return [('or_expr', or_expr.add_check(check))] + + @reducer('not', 'check') + def _make_not_expr(self, _not, check): + """Invert the result of another check.""" + + return [('check', NotCheck(check))] + + +def _parse_text_rule(rule): + """ + Translates a policy written in the policy language into a tree of + Check objects. + """ + + # Empty rule means always accept + if not rule: + return TrueCheck() + + # Parse the token stream + state = ParseState() + for tok, value in _parse_tokenize(rule): + state.shift(tok, value) + + try: + return state.result + except ValueError: + # Couldn't parse the rule + LOG.exception(_("Failed to understand rule %(rule)r") % locals()) + + # Fail closed + return FalseCheck() + + +def parse_rule(rule): + """ + Parses a policy rule into a tree of Check objects. + """ + + # If the rule is a string, it's in the policy language + if isinstance(rule, basestring): + return _parse_text_rule(rule) + return _parse_list_rule(rule) + + +def register(name, func=None): + """ + Register a function or Check class as a policy check. + + :param name: Gives the name of the check type, e.g., 'rule', + 'role', etc. If name is None, a default check type + will be registered. + :param func: If given, provides the function or class to register. + If not given, returns a function taking one argument + to specify the function or class to register, + allowing use as a decorator. + """ + + # Perform the actual decoration by registering the function or + # class. Returns the function or class for compliance with the + # decorator interface. + def decorator(func): + _checks[name] = func + return func + + # If the function or class is given, do the registration + if func: + return decorator(func) + + return decorator + + +@register("rule") +class RuleCheck(Check): + def __call__(self, target, creds): + """ + Recursively checks credentials based on the defined rules. + """ + + try: + return _rules[self.match](target, creds) + except KeyError: + # We don't have any matching rule; fail closed + return False + + +@register("role") +class RoleCheck(Check): + def __call__(self, target, creds): + """Check that there is a matching role in the cred dict.""" + + return self.match.lower() in [x.lower() for x in creds['roles']] + + +@register('http') +class HttpCheck(Check): + def __call__(self, target, creds): + """ + Check http: rules by calling to a remote server. + + This example implementation simply verifies that the response + is exactly 'True'. + """ + + url = ('http:' + self.match) % target + data = {'target': jsonutils.dumps(target), + 'credentials': jsonutils.dumps(creds)} + post_data = urllib.urlencode(data) + f = urllib2.urlopen(url, post_data) + return f.read() == "True" + + +@register(None) +class GenericCheck(Check): + def __call__(self, target, creds): + """ + Check an individual match. + + Matches look like: + + tenant:%(tenant_id)s + role:compute:admin + """ + + # TODO(termie): do dict inspection via dot syntax + match = self.match % target + if self.kind in creds: + return match == unicode(creds[self.kind]) + return False diff --git a/cloudbaseinit/openstack/common/rpc/__init__.py b/cloudbaseinit/openstack/common/rpc/__init__.py new file mode 100644 index 00000000..f5700cfb --- /dev/null +++ b/cloudbaseinit/openstack/common/rpc/__init__.py @@ -0,0 +1,270 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A remote procedure call (rpc) abstraction. + +For some wrappers that add message versioning to rpc, see: + rpc.dispatcher + rpc.proxy +""" + +from cloudbaseinit.openstack.common import cfg +from cloudbaseinit.openstack.common import importutils + + +rpc_opts = [ + cfg.StrOpt('rpc_backend', + default='%s.impl_kombu' % __package__, + help="The messaging module to use, defaults to kombu."), + cfg.IntOpt('rpc_thread_pool_size', + default=64, + help='Size of RPC thread pool'), + cfg.IntOpt('rpc_conn_pool_size', + default=30, + help='Size of RPC connection pool'), + cfg.IntOpt('rpc_response_timeout', + default=60, + help='Seconds to wait for a response from call or multicall'), + cfg.IntOpt('rpc_cast_timeout', + default=30, + help='Seconds to wait before a cast expires (TTL). ' + 'Only supported by impl_zmq.'), + cfg.ListOpt('allowed_rpc_exception_modules', + default=['cloudbaseinit.openstack.common.exception', + 'nova.exception', + 'cinder.exception', + ], + help='Modules of exceptions that are permitted to be recreated' + 'upon receiving exception data from an rpc call.'), + cfg.BoolOpt('fake_rabbit', + default=False, + help='If passed, use a fake RabbitMQ provider'), + # + # The following options are not registered here, but are expected to be + # present. The project using this library must register these options with + # the configuration so that project-specific defaults may be defined. + # + #cfg.StrOpt('control_exchange', + # default='nova', + # help='AMQP exchange to connect to if using RabbitMQ or Qpid'), +] + +cfg.CONF.register_opts(rpc_opts) + + +def create_connection(new=True): + """Create a connection to the message bus used for rpc. + + For some example usage of creating a connection and some consumers on that + connection, see nova.service. + + :param new: Whether or not to create a new connection. A new connection + will be created by default. If new is False, the + implementation is free to return an existing connection from a + pool. + + :returns: An instance of openstack.common.rpc.common.Connection + """ + return _get_impl().create_connection(cfg.CONF, new=new) + + +def call(context, topic, msg, timeout=None): + """Invoke a remote method that returns something. + + :param context: Information that identifies the user that has made this + request. + :param topic: The topic to send the rpc message to. This correlates to the + topic argument of + openstack.common.rpc.common.Connection.create_consumer() + and only applies when the consumer was created with + fanout=False. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + :param timeout: int, number of seconds to use for a response timeout. + If set, this overrides the rpc_response_timeout option. + + :returns: A dict from the remote method. + + :raises: openstack.common.rpc.common.Timeout if a complete response + is not received before the timeout is reached. + """ + return _get_impl().call(cfg.CONF, context, topic, msg, timeout) + + +def cast(context, topic, msg): + """Invoke a remote method that does not return anything. + + :param context: Information that identifies the user that has made this + request. + :param topic: The topic to send the rpc message to. This correlates to the + topic argument of + openstack.common.rpc.common.Connection.create_consumer() + and only applies when the consumer was created with + fanout=False. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + + :returns: None + """ + return _get_impl().cast(cfg.CONF, context, topic, msg) + + +def fanout_cast(context, topic, msg): + """Broadcast a remote method invocation with no return. + + This method will get invoked on all consumers that were set up with this + topic name and fanout=True. + + :param context: Information that identifies the user that has made this + request. + :param topic: The topic to send the rpc message to. This correlates to the + topic argument of + openstack.common.rpc.common.Connection.create_consumer() + and only applies when the consumer was created with + fanout=True. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + + :returns: None + """ + return _get_impl().fanout_cast(cfg.CONF, context, topic, msg) + + +def multicall(context, topic, msg, timeout=None): + """Invoke a remote method and get back an iterator. + + In this case, the remote method will be returning multiple values in + separate messages, so the return values can be processed as the come in via + an iterator. + + :param context: Information that identifies the user that has made this + request. + :param topic: The topic to send the rpc message to. This correlates to the + topic argument of + openstack.common.rpc.common.Connection.create_consumer() + and only applies when the consumer was created with + fanout=False. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + :param timeout: int, number of seconds to use for a response timeout. + If set, this overrides the rpc_response_timeout option. + + :returns: An iterator. The iterator will yield a tuple (N, X) where N is + an index that starts at 0 and increases by one for each value + returned and X is the Nth value that was returned by the remote + method. + + :raises: openstack.common.rpc.common.Timeout if a complete response + is not received before the timeout is reached. + """ + return _get_impl().multicall(cfg.CONF, context, topic, msg, timeout) + + +def notify(context, topic, msg): + """Send notification event. + + :param context: Information that identifies the user that has made this + request. + :param topic: The topic to send the notification to. + :param msg: This is a dict of content of event. + + :returns: None + """ + return _get_impl().notify(cfg.CONF, context, topic, msg) + + +def cleanup(): + """Clean up resoruces in use by implementation. + + Clean up any resources that have been allocated by the RPC implementation. + This is typically open connections to a messaging service. This function + would get called before an application using this API exits to allow + connections to get torn down cleanly. + + :returns: None + """ + return _get_impl().cleanup() + + +def cast_to_server(context, server_params, topic, msg): + """Invoke a remote method that does not return anything. + + :param context: Information that identifies the user that has made this + request. + :param server_params: Connection information + :param topic: The topic to send the notification to. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + + :returns: None + """ + return _get_impl().cast_to_server(cfg.CONF, context, server_params, topic, + msg) + + +def fanout_cast_to_server(context, server_params, topic, msg): + """Broadcast to a remote method invocation with no return. + + :param context: Information that identifies the user that has made this + request. + :param server_params: Connection information + :param topic: The topic to send the notification to. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + + :returns: None + """ + return _get_impl().fanout_cast_to_server(cfg.CONF, context, server_params, + topic, msg) + + +def queue_get_for(context, topic, host): + """Get a queue name for a given topic + host. + + This function only works if this naming convention is followed on the + consumer side, as well. For example, in nova, every instance of the + nova-foo service calls create_consumer() for two topics: + + foo + foo. + + Messages sent to the 'foo' topic are distributed to exactly one instance of + the nova-foo service. The services are chosen in a round-robin fashion. + Messages sent to the 'foo.' topic are sent to the nova-foo service on + . + """ + return '%s.%s' % (topic, host) if host else topic + + +_RPCIMPL = None + + +def _get_impl(): + """Delay import of rpc_backend until configuration is loaded.""" + global _RPCIMPL + if _RPCIMPL is None: + try: + _RPCIMPL = importutils.import_module(cfg.CONF.rpc_backend) + except ImportError: + # For backwards compatibility with older nova config. + impl = cfg.CONF.rpc_backend.replace('nova.rpc', + 'nova.openstack.common.rpc') + _RPCIMPL = importutils.import_module(impl) + return _RPCIMPL diff --git a/cloudbaseinit/openstack/common/rpc/amqp.py b/cloudbaseinit/openstack/common/rpc/amqp.py new file mode 100644 index 00000000..0efd43e2 --- /dev/null +++ b/cloudbaseinit/openstack/common/rpc/amqp.py @@ -0,0 +1,427 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2011 - 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Shared code between AMQP based openstack.common.rpc implementations. + +The code in this module is shared between the rpc implemenations based on AMQP. +Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses +AMQP, but is deprecated and predates this code. +""" + +import inspect +import sys +import uuid + +from eventlet import greenpool +from eventlet import pools +from eventlet import semaphore + +from cloudbaseinit.openstack.common import cfg +from cloudbaseinit.openstack.common import excutils +from cloudbaseinit.openstack.common.gettextutils import _ +from cloudbaseinit.openstack.common import local +from cloudbaseinit.openstack.common import log as logging +from cloudbaseinit.openstack.common.rpc import common as rpc_common + + +LOG = logging.getLogger(__name__) + + +class Pool(pools.Pool): + """Class that implements a Pool of Connections.""" + def __init__(self, conf, connection_cls, *args, **kwargs): + self.connection_cls = connection_cls + self.conf = conf + kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size) + kwargs.setdefault("order_as_stack", True) + super(Pool, self).__init__(*args, **kwargs) + + # TODO(comstud): Timeout connections not used in a while + def create(self): + LOG.debug(_('Pool creating new connection')) + return self.connection_cls(self.conf) + + def empty(self): + while self.free_items: + self.get().close() + + +_pool_create_sem = semaphore.Semaphore() + + +def get_connection_pool(conf, connection_cls): + with _pool_create_sem: + # Make sure only one thread tries to create the connection pool. + if not connection_cls.pool: + connection_cls.pool = Pool(conf, connection_cls) + return connection_cls.pool + + +class ConnectionContext(rpc_common.Connection): + """The class that is actually returned to the caller of + create_connection(). This is essentially a wrapper around + Connection that supports 'with'. It can also return a new + Connection, or one from a pool. The function will also catch + when an instance of this class is to be deleted. With that + we can return Connections to the pool on exceptions and so + forth without making the caller be responsible for catching + them. If possible the function makes sure to return a + connection to the pool. + """ + + def __init__(self, conf, connection_pool, pooled=True, server_params=None): + """Create a new connection, or get one from the pool""" + self.connection = None + self.conf = conf + self.connection_pool = connection_pool + if pooled: + self.connection = connection_pool.get() + else: + self.connection = connection_pool.connection_cls( + conf, + server_params=server_params) + self.pooled = pooled + + def __enter__(self): + """When with ConnectionContext() is used, return self""" + return self + + def _done(self): + """If the connection came from a pool, clean it up and put it back. + If it did not come from a pool, close it. + """ + if self.connection: + if self.pooled: + # Reset the connection so it's ready for the next caller + # to grab from the pool + self.connection.reset() + self.connection_pool.put(self.connection) + else: + try: + self.connection.close() + except Exception: + pass + self.connection = None + + def __exit__(self, exc_type, exc_value, tb): + """End of 'with' statement. We're done here.""" + self._done() + + def __del__(self): + """Caller is done with this connection. Make sure we cleaned up.""" + self._done() + + def close(self): + """Caller is done with this connection.""" + self._done() + + def create_consumer(self, topic, proxy, fanout=False): + self.connection.create_consumer(topic, proxy, fanout) + + def create_worker(self, topic, proxy, pool_name): + self.connection.create_worker(topic, proxy, pool_name) + + def consume_in_thread(self): + self.connection.consume_in_thread() + + def __getattr__(self, key): + """Proxy all other calls to the Connection instance""" + if self.connection: + return getattr(self.connection, key) + else: + raise rpc_common.InvalidRPCConnectionReuse() + + +def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None, + ending=False): + """Sends a reply or an error on the channel signified by msg_id. + + Failure should be a sys.exc_info() tuple. + + """ + with ConnectionContext(conf, connection_pool) as conn: + if failure: + failure = rpc_common.serialize_remote_exception(failure) + + try: + msg = {'result': reply, 'failure': failure} + except TypeError: + msg = {'result': dict((k, repr(v)) + for k, v in reply.__dict__.iteritems()), + 'failure': failure} + if ending: + msg['ending'] = True + conn.direct_send(msg_id, msg) + + +class RpcContext(rpc_common.CommonRpcContext): + """Context that supports replying to a rpc.call""" + def __init__(self, **kwargs): + self.msg_id = kwargs.pop('msg_id', None) + self.conf = kwargs.pop('conf') + super(RpcContext, self).__init__(**kwargs) + + def deepcopy(self): + values = self.to_dict() + values['conf'] = self.conf + values['msg_id'] = self.msg_id + return self.__class__(**values) + + def reply(self, reply=None, failure=None, ending=False, + connection_pool=None): + if self.msg_id: + msg_reply(self.conf, self.msg_id, connection_pool, reply, failure, + ending) + if ending: + self.msg_id = None + + +def unpack_context(conf, msg): + """Unpack context from msg.""" + context_dict = {} + for key in list(msg.keys()): + # NOTE(vish): Some versions of python don't like unicode keys + # in kwargs. + key = str(key) + if key.startswith('_context_'): + value = msg.pop(key) + context_dict[key[9:]] = value + context_dict['msg_id'] = msg.pop('_msg_id', None) + context_dict['conf'] = conf + ctx = RpcContext.from_dict(context_dict) + rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict()) + return ctx + + +def pack_context(msg, context): + """Pack context into msg. + + Values for message keys need to be less than 255 chars, so we pull + context out into a bunch of separate keys. If we want to support + more arguments in rabbit messages, we may want to do the same + for args at some point. + + """ + context_d = dict([('_context_%s' % key, value) + for (key, value) in context.to_dict().iteritems()]) + msg.update(context_d) + + +class ProxyCallback(object): + """Calls methods on a proxy object based on method and args.""" + + def __init__(self, conf, proxy, connection_pool): + self.proxy = proxy + self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size) + self.connection_pool = connection_pool + self.conf = conf + + def __call__(self, message_data): + """Consumer callback to call a method on a proxy object. + + Parses the message for validity and fires off a thread to call the + proxy object method. + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + + """ + # It is important to clear the context here, because at this point + # the previous context is stored in local.store.context + if hasattr(local.store, 'context'): + del local.store.context + rpc_common._safe_log(LOG.debug, _('received %s'), message_data) + ctxt = unpack_context(self.conf, message_data) + method = message_data.get('method') + args = message_data.get('args', {}) + version = message_data.get('version', None) + if not method: + LOG.warn(_('no method for message: %s') % message_data) + ctxt.reply(_('No method for message: %s') % message_data, + connection_pool=self.connection_pool) + return + self.pool.spawn_n(self._process_data, ctxt, version, method, args) + + def _process_data(self, ctxt, version, method, args): + """Process a message in a new thread. + + If the proxy object we have has a dispatch method + (see rpc.dispatcher.RpcDispatcher), pass it the version, + method, and args and let it dispatch as appropriate. If not, use + the old behavior of magically calling the specified method on the + proxy we have here. + """ + ctxt.update_store() + try: + rval = self.proxy.dispatch(ctxt, version, method, **args) + # Check if the result was a generator + if inspect.isgenerator(rval): + for x in rval: + ctxt.reply(x, None, connection_pool=self.connection_pool) + else: + ctxt.reply(rval, None, connection_pool=self.connection_pool) + # This final None tells multicall that it is done. + ctxt.reply(ending=True, connection_pool=self.connection_pool) + except Exception: + LOG.exception(_('Exception during message handling')) + ctxt.reply(None, sys.exc_info(), + connection_pool=self.connection_pool) + + +class MulticallWaiter(object): + def __init__(self, conf, connection, timeout): + self._connection = connection + self._iterator = connection.iterconsume(timeout=timeout or + conf.rpc_response_timeout) + self._result = None + self._done = False + self._got_ending = False + self._conf = conf + + def done(self): + if self._done: + return + self._done = True + self._iterator.close() + self._iterator = None + self._connection.close() + + def __call__(self, data): + """The consume() callback will call this. Store the result.""" + if data['failure']: + failure = data['failure'] + self._result = rpc_common.deserialize_remote_exception(self._conf, + failure) + + elif data.get('ending', False): + self._got_ending = True + else: + self._result = data['result'] + + def __iter__(self): + """Return a result until we get a 'None' response from consumer""" + if self._done: + raise StopIteration + while True: + try: + self._iterator.next() + except Exception: + with excutils.save_and_reraise_exception(): + self.done() + if self._got_ending: + self.done() + raise StopIteration + result = self._result + if isinstance(result, Exception): + self.done() + raise result + yield result + + +def create_connection(conf, new, connection_pool): + """Create a connection""" + return ConnectionContext(conf, connection_pool, pooled=not new) + + +def multicall(conf, context, topic, msg, timeout, connection_pool): + """Make a call that returns multiple times.""" + # Can't use 'with' for multicall, as it returns an iterator + # that will continue to use the connection. When it's done, + # connection.close() will get called which will put it back into + # the pool + LOG.debug(_('Making asynchronous call on %s ...'), topic) + msg_id = uuid.uuid4().hex + msg.update({'_msg_id': msg_id}) + LOG.debug(_('MSG_ID is %s') % (msg_id)) + pack_context(msg, context) + + conn = ConnectionContext(conf, connection_pool) + wait_msg = MulticallWaiter(conf, conn, timeout) + conn.declare_direct_consumer(msg_id, wait_msg) + conn.topic_send(topic, msg) + return wait_msg + + +def call(conf, context, topic, msg, timeout, connection_pool): + """Sends a message on a topic and wait for a response.""" + rv = multicall(conf, context, topic, msg, timeout, connection_pool) + # NOTE(vish): return the last result from the multicall + rv = list(rv) + if not rv: + return + return rv[-1] + + +def cast(conf, context, topic, msg, connection_pool): + """Sends a message on a topic without waiting for a response.""" + LOG.debug(_('Making asynchronous cast on %s...'), topic) + pack_context(msg, context) + with ConnectionContext(conf, connection_pool) as conn: + conn.topic_send(topic, msg) + + +def fanout_cast(conf, context, topic, msg, connection_pool): + """Sends a message on a fanout exchange without waiting for a response.""" + LOG.debug(_('Making asynchronous fanout cast...')) + pack_context(msg, context) + with ConnectionContext(conf, connection_pool) as conn: + conn.fanout_send(topic, msg) + + +def cast_to_server(conf, context, server_params, topic, msg, connection_pool): + """Sends a message on a topic to a specific server.""" + pack_context(msg, context) + with ConnectionContext(conf, connection_pool, pooled=False, + server_params=server_params) as conn: + conn.topic_send(topic, msg) + + +def fanout_cast_to_server(conf, context, server_params, topic, msg, + connection_pool): + """Sends a message on a fanout exchange to a specific server.""" + pack_context(msg, context) + with ConnectionContext(conf, connection_pool, pooled=False, + server_params=server_params) as conn: + conn.fanout_send(topic, msg) + + +def notify(conf, context, topic, msg, connection_pool): + """Sends a notification event on a topic.""" + LOG.debug(_('Sending %(event_type)s on %(topic)s'), + dict(event_type=msg.get('event_type'), + topic=topic)) + pack_context(msg, context) + with ConnectionContext(conf, connection_pool) as conn: + conn.notify_send(topic, msg) + + +def cleanup(connection_pool): + if connection_pool: + connection_pool.empty() + + +def get_control_exchange(conf): + try: + return conf.control_exchange + except cfg.NoSuchOptError: + return 'openstack' diff --git a/cloudbaseinit/openstack/common/rpc/common.py b/cloudbaseinit/openstack/common/rpc/common.py new file mode 100644 index 00000000..ff61f2eb --- /dev/null +++ b/cloudbaseinit/openstack/common/rpc/common.py @@ -0,0 +1,311 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import traceback + +from cloudbaseinit.openstack.common.gettextutils import _ +from cloudbaseinit.openstack.common import importutils +from cloudbaseinit.openstack.common import jsonutils +from cloudbaseinit.openstack.common import local +from cloudbaseinit.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class RPCException(Exception): + message = _("An unknown RPC related exception occurred.") + + def __init__(self, message=None, **kwargs): + self.kwargs = kwargs + + if not message: + try: + message = self.message % kwargs + + except Exception: + # kwargs doesn't match a variable in the message + # log the issue and the kwargs + LOG.exception(_('Exception in string format operation')) + for name, value in kwargs.iteritems(): + LOG.error("%s: %s" % (name, value)) + # at least get the core message out if something happened + message = self.message + + super(RPCException, self).__init__(message) + + +class RemoteError(RPCException): + """Signifies that a remote class has raised an exception. + + Contains a string representation of the type of the original exception, + the value of the original exception, and the traceback. These are + sent to the parent as a joined string so printing the exception + contains all of the relevant info. + + """ + message = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.") + + def __init__(self, exc_type=None, value=None, traceback=None): + self.exc_type = exc_type + self.value = value + self.traceback = traceback + super(RemoteError, self).__init__(exc_type=exc_type, + value=value, + traceback=traceback) + + +class Timeout(RPCException): + """Signifies that a timeout has occurred. + + This exception is raised if the rpc_response_timeout is reached while + waiting for a response from the remote side. + """ + message = _("Timeout while waiting on RPC response.") + + +class InvalidRPCConnectionReuse(RPCException): + message = _("Invalid reuse of an RPC connection.") + + +class UnsupportedRpcVersion(RPCException): + message = _("Specified RPC version, %(version)s, not supported by " + "this endpoint.") + + +class Connection(object): + """A connection, returned by rpc.create_connection(). + + This class represents a connection to the message bus used for rpc. + An instance of this class should never be created by users of the rpc API. + Use rpc.create_connection() instead. + """ + def close(self): + """Close the connection. + + This method must be called when the connection will no longer be used. + It will ensure that any resources associated with the connection, such + as a network connection, and cleaned up. + """ + raise NotImplementedError() + + def create_consumer(self, topic, proxy, fanout=False): + """Create a consumer on this connection. + + A consumer is associated with a message queue on the backend message + bus. The consumer will read messages from the queue, unpack them, and + dispatch them to the proxy object. The contents of the message pulled + off of the queue will determine which method gets called on the proxy + object. + + :param topic: This is a name associated with what to consume from. + Multiple instances of a service may consume from the same + topic. For example, all instances of nova-compute consume + from a queue called "compute". In that case, the + messages will get distributed amongst the consumers in a + round-robin fashion if fanout=False. If fanout=True, + every consumer associated with this topic will get a + copy of every message. + :param proxy: The object that will handle all incoming messages. + :param fanout: Whether or not this is a fanout topic. See the + documentation for the topic parameter for some + additional comments on this. + """ + raise NotImplementedError() + + def create_worker(self, topic, proxy, pool_name): + """Create a worker on this connection. + + A worker is like a regular consumer of messages directed to a + topic, except that it is part of a set of such consumers (the + "pool") which may run in parallel. Every pool of workers will + receive a given message, but only one worker in the pool will + be asked to process it. Load is distributed across the members + of the pool in round-robin fashion. + + :param topic: This is a name associated with what to consume from. + Multiple instances of a service may consume from the same + topic. + :param proxy: The object that will handle all incoming messages. + :param pool_name: String containing the name of the pool of workers + """ + raise NotImplementedError() + + def consume_in_thread(self): + """Spawn a thread to handle incoming messages. + + Spawn a thread that will be responsible for handling all incoming + messages for consumers that were set up on this connection. + + Message dispatching inside of this is expected to be implemented in a + non-blocking manner. An example implementation would be having this + thread pull messages in for all of the consumers, but utilize a thread + pool for dispatching the messages to the proxy objects. + """ + raise NotImplementedError() + + +def _safe_log(log_func, msg, msg_data): + """Sanitizes the msg_data field before logging.""" + SANITIZE = {'set_admin_password': ('new_pass',), + 'run_instance': ('admin_password',), } + + has_method = 'method' in msg_data and msg_data['method'] in SANITIZE + has_context_token = '_context_auth_token' in msg_data + has_token = 'auth_token' in msg_data + + if not any([has_method, has_context_token, has_token]): + return log_func(msg, msg_data) + + msg_data = copy.deepcopy(msg_data) + + if has_method: + method = msg_data['method'] + if method in SANITIZE: + args_to_sanitize = SANITIZE[method] + for arg in args_to_sanitize: + try: + msg_data['args'][arg] = "" + except KeyError: + pass + + if has_context_token: + msg_data['_context_auth_token'] = '' + + if has_token: + msg_data['auth_token'] = '' + + return log_func(msg, msg_data) + + +def serialize_remote_exception(failure_info): + """Prepares exception data to be sent over rpc. + + Failure_info should be a sys.exc_info() tuple. + + """ + tb = traceback.format_exception(*failure_info) + failure = failure_info[1] + LOG.error(_("Returning exception %s to caller"), unicode(failure)) + LOG.error(tb) + + kwargs = {} + if hasattr(failure, 'kwargs'): + kwargs = failure.kwargs + + data = { + 'class': str(failure.__class__.__name__), + 'module': str(failure.__class__.__module__), + 'message': unicode(failure), + 'tb': tb, + 'args': failure.args, + 'kwargs': kwargs + } + + json_data = jsonutils.dumps(data) + + return json_data + + +def deserialize_remote_exception(conf, data): + failure = jsonutils.loads(str(data)) + + trace = failure.get('tb', []) + message = failure.get('message', "") + "\n" + "\n".join(trace) + name = failure.get('class') + module = failure.get('module') + + # NOTE(ameade): We DO NOT want to allow just any module to be imported, in + # order to prevent arbitrary code execution. + if not module in conf.allowed_rpc_exception_modules: + return RemoteError(name, failure.get('message'), trace) + + try: + mod = importutils.import_module(module) + klass = getattr(mod, name) + if not issubclass(klass, Exception): + raise TypeError("Can only deserialize Exceptions") + + failure = klass(**failure.get('kwargs', {})) + except (AttributeError, TypeError, ImportError): + return RemoteError(name, failure.get('message'), trace) + + ex_type = type(failure) + str_override = lambda self: message + new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,), + {'__str__': str_override, '__unicode__': str_override}) + try: + # NOTE(ameade): Dynamically create a new exception type and swap it in + # as the new type for the exception. This only works on user defined + # Exceptions and not core python exceptions. This is important because + # we cannot necessarily change an exception message so we must override + # the __str__ method. + failure.__class__ = new_ex_type + except TypeError: + # NOTE(ameade): If a core exception then just add the traceback to the + # first exception argument. + failure.args = (message,) + failure.args[1:] + return failure + + +class CommonRpcContext(object): + def __init__(self, **kwargs): + self.values = kwargs + + def __getattr__(self, key): + try: + return self.values[key] + except KeyError: + raise AttributeError(key) + + def to_dict(self): + return copy.deepcopy(self.values) + + @classmethod + def from_dict(cls, values): + return cls(**values) + + def deepcopy(self): + return self.from_dict(self.to_dict()) + + def update_store(self): + local.store.context = self + + def elevated(self, read_deleted=None, overwrite=False): + """Return a version of this context with admin flag set.""" + # TODO(russellb) This method is a bit of a nova-ism. It makes + # some assumptions about the data in the request context sent + # across rpc, while the rest of this class does not. We could get + # rid of this if we changed the nova code that uses this to + # convert the RpcContext back to its native RequestContext doing + # something like nova.context.RequestContext.from_dict(ctxt.to_dict()) + + context = self.deepcopy() + context.values['is_admin'] = True + + context.values.setdefault('roles', []) + + if 'admin' not in context.values['roles']: + context.values['roles'].append('admin') + + if read_deleted is not None: + context.values['read_deleted'] = read_deleted + + return context diff --git a/cloudbaseinit/openstack/common/rpc/dispatcher.py b/cloudbaseinit/openstack/common/rpc/dispatcher.py new file mode 100644 index 00000000..259e1f44 --- /dev/null +++ b/cloudbaseinit/openstack/common/rpc/dispatcher.py @@ -0,0 +1,152 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Code for rpc message dispatching. + +Messages that come in have a version number associated with them. RPC API +version numbers are in the form: + + Major.Minor + +For a given message with version X.Y, the receiver must be marked as able to +handle messages of version A.B, where: + + A = X + + B >= Y + +The Major version number would be incremented for an almost completely new API. +The Minor version number would be incremented for backwards compatible changes +to an existing API. A backwards compatible change could be something like +adding a new method, adding an argument to an existing method (but not +requiring it), or changing the type for an existing argument (but still +handling the old type as well). + +The conversion over to a versioned API must be done on both the client side and +server side of the API at the same time. However, as the code stands today, +there can be both versioned and unversioned APIs implemented in the same code +base. + +EXAMPLES +======== + +Nova was the first project to use versioned rpc APIs. Consider the compute rpc +API as an example. The client side is in nova/compute/rpcapi.py and the server +side is in nova/compute/manager.py. + + +Example 1) Adding a new method. +------------------------------- + +Adding a new method is a backwards compatible change. It should be added to +nova/compute/manager.py, and RPC_API_VERSION should be bumped from X.Y to +X.Y+1. On the client side, the new method in nova/compute/rpcapi.py should +have a specific version specified to indicate the minimum API version that must +be implemented for the method to be supported. For example:: + + def get_host_uptime(self, ctxt, host): + topic = _compute_topic(self.topic, ctxt, host, None) + return self.call(ctxt, self.make_msg('get_host_uptime'), topic, + version='1.1') + +In this case, version '1.1' is the first version that supported the +get_host_uptime() method. + + +Example 2) Adding a new parameter. +---------------------------------- + +Adding a new parameter to an rpc method can be made backwards compatible. The +RPC_API_VERSION on the server side (nova/compute/manager.py) should be bumped. +The implementation of the method must not expect the parameter to be present.:: + + def some_remote_method(self, arg1, arg2, newarg=None): + # The code needs to deal with newarg=None for cases + # where an older client sends a message without it. + pass + +On the client side, the same changes should be made as in example 1. The +minimum version that supports the new parameter should be specified. +""" + +from cloudbaseinit.openstack.common.rpc import common as rpc_common + + +class RpcDispatcher(object): + """Dispatch rpc messages according to the requested API version. + + This class can be used as the top level 'manager' for a service. It + contains a list of underlying managers that have an API_VERSION attribute. + """ + + def __init__(self, callbacks): + """Initialize the rpc dispatcher. + + :param callbacks: List of proxy objects that are an instance + of a class with rpc methods exposed. Each proxy + object should have an RPC_API_VERSION attribute. + """ + self.callbacks = callbacks + super(RpcDispatcher, self).__init__() + + @staticmethod + def _is_compatible(mversion, version): + """Determine whether versions are compatible. + + :param mversion: The API version implemented by a callback. + :param version: The API version requested by an incoming message. + """ + version_parts = version.split('.') + mversion_parts = mversion.split('.') + if int(version_parts[0]) != int(mversion_parts[0]): # Major + return False + if int(version_parts[1]) > int(mversion_parts[1]): # Minor + return False + return True + + def dispatch(self, ctxt, version, method, **kwargs): + """Dispatch a message based on a requested version. + + :param ctxt: The request context + :param version: The requested API version from the incoming message + :param method: The method requested to be called by the incoming + message. + :param kwargs: A dict of keyword arguments to be passed to the method. + + :returns: Whatever is returned by the underlying method that gets + called. + """ + if not version: + version = '1.0' + + had_compatible = False + for proxyobj in self.callbacks: + if hasattr(proxyobj, 'RPC_API_VERSION'): + rpc_api_version = proxyobj.RPC_API_VERSION + else: + rpc_api_version = '1.0' + is_compatible = self._is_compatible(rpc_api_version, version) + had_compatible = had_compatible or is_compatible + if not hasattr(proxyobj, method): + continue + if is_compatible: + return getattr(proxyobj, method)(ctxt, **kwargs) + + if had_compatible: + raise AttributeError("No such RPC function '%s'" % method) + else: + raise rpc_common.UnsupportedRpcVersion(version=version) diff --git a/cloudbaseinit/openstack/common/rpc/impl_fake.py b/cloudbaseinit/openstack/common/rpc/impl_fake.py new file mode 100644 index 00000000..8edda9f3 --- /dev/null +++ b/cloudbaseinit/openstack/common/rpc/impl_fake.py @@ -0,0 +1,184 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Fake RPC implementation which calls proxy methods directly with no +queues. Casts will block, but this is very useful for tests. +""" + +import inspect +import time + +import eventlet + +from cloudbaseinit.openstack.common import jsonutils +from cloudbaseinit.openstack.common.rpc import common as rpc_common + +CONSUMERS = {} + + +class RpcContext(rpc_common.CommonRpcContext): + def __init__(self, **kwargs): + super(RpcContext, self).__init__(**kwargs) + self._response = [] + self._done = False + + def deepcopy(self): + values = self.to_dict() + new_inst = self.__class__(**values) + new_inst._response = self._response + new_inst._done = self._done + return new_inst + + def reply(self, reply=None, failure=None, ending=False): + if ending: + self._done = True + if not self._done: + self._response.append((reply, failure)) + + +class Consumer(object): + def __init__(self, topic, proxy): + self.topic = topic + self.proxy = proxy + + def call(self, context, version, method, args, timeout): + done = eventlet.event.Event() + + def _inner(): + ctxt = RpcContext.from_dict(context.to_dict()) + try: + rval = self.proxy.dispatch(context, version, method, **args) + res = [] + # Caller might have called ctxt.reply() manually + for (reply, failure) in ctxt._response: + if failure: + raise failure[0], failure[1], failure[2] + res.append(reply) + # if ending not 'sent'...we might have more data to + # return from the function itself + if not ctxt._done: + if inspect.isgenerator(rval): + for val in rval: + res.append(val) + else: + res.append(rval) + done.send(res) + except Exception as e: + done.send_exception(e) + + thread = eventlet.greenthread.spawn(_inner) + + if timeout: + start_time = time.time() + while not done.ready(): + eventlet.greenthread.sleep(1) + cur_time = time.time() + if (cur_time - start_time) > timeout: + thread.kill() + raise rpc_common.Timeout() + + return done.wait() + + +class Connection(object): + """Connection object.""" + + def __init__(self): + self.consumers = [] + + def create_consumer(self, topic, proxy, fanout=False): + consumer = Consumer(topic, proxy) + self.consumers.append(consumer) + if topic not in CONSUMERS: + CONSUMERS[topic] = [] + CONSUMERS[topic].append(consumer) + + def close(self): + for consumer in self.consumers: + CONSUMERS[consumer.topic].remove(consumer) + self.consumers = [] + + def consume_in_thread(self): + pass + + +def create_connection(conf, new=True): + """Create a connection""" + return Connection() + + +def check_serialize(msg): + """Make sure a message intended for rpc can be serialized.""" + jsonutils.dumps(msg) + + +def multicall(conf, context, topic, msg, timeout=None): + """Make a call that returns multiple times.""" + + check_serialize(msg) + + method = msg.get('method') + if not method: + return + args = msg.get('args', {}) + version = msg.get('version', None) + + try: + consumer = CONSUMERS[topic][0] + except (KeyError, IndexError): + return iter([None]) + else: + return consumer.call(context, version, method, args, timeout) + + +def call(conf, context, topic, msg, timeout=None): + """Sends a message on a topic and wait for a response.""" + rv = multicall(conf, context, topic, msg, timeout) + # NOTE(vish): return the last result from the multicall + rv = list(rv) + if not rv: + return + return rv[-1] + + +def cast(conf, context, topic, msg): + try: + call(conf, context, topic, msg) + except Exception: + pass + + +def notify(conf, context, topic, msg): + check_serialize(msg) + + +def cleanup(): + pass + + +def fanout_cast(conf, context, topic, msg): + """Cast to all consumers of a topic""" + check_serialize(msg) + method = msg.get('method') + if not method: + return + args = msg.get('args', {}) + version = msg.get('version', None) + + for consumer in CONSUMERS.get(topic, []): + try: + consumer.call(context, version, method, args, None) + except Exception: + pass diff --git a/cloudbaseinit/openstack/common/rpc/impl_kombu.py b/cloudbaseinit/openstack/common/rpc/impl_kombu.py new file mode 100644 index 00000000..c573fab5 --- /dev/null +++ b/cloudbaseinit/openstack/common/rpc/impl_kombu.py @@ -0,0 +1,793 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools +import itertools +import socket +import ssl +import sys +import time +import uuid + +import eventlet +import greenlet +import kombu +import kombu.connection +import kombu.entity +import kombu.messaging + +from cloudbaseinit.openstack.common import cfg +from cloudbaseinit.openstack.common.gettextutils import _ +from cloudbaseinit.openstack.common import network_utils +from cloudbaseinit.openstack.common.rpc import amqp as rpc_amqp +from cloudbaseinit.openstack.common.rpc import common as rpc_common + +kombu_opts = [ + cfg.StrOpt('kombu_ssl_version', + default='', + help='SSL version to use (valid only if SSL enabled)'), + cfg.StrOpt('kombu_ssl_keyfile', + default='', + help='SSL key file (valid only if SSL enabled)'), + cfg.StrOpt('kombu_ssl_certfile', + default='', + help='SSL cert file (valid only if SSL enabled)'), + cfg.StrOpt('kombu_ssl_ca_certs', + default='', + help=('SSL certification authority file ' + '(valid only if SSL enabled)')), + cfg.StrOpt('rabbit_host', + default='localhost', + help='The RabbitMQ broker address where a single node is used'), + cfg.IntOpt('rabbit_port', + default=5672, + help='The RabbitMQ broker port where a single node is used'), + cfg.ListOpt('rabbit_hosts', + default=['$rabbit_host:$rabbit_port'], + help='RabbitMQ HA cluster host:port pairs'), + cfg.BoolOpt('rabbit_use_ssl', + default=False, + help='connect over SSL for RabbitMQ'), + cfg.StrOpt('rabbit_userid', + default='guest', + help='the RabbitMQ userid'), + cfg.StrOpt('rabbit_password', + default='guest', + help='the RabbitMQ password'), + cfg.StrOpt('rabbit_virtual_host', + default='/', + help='the RabbitMQ virtual host'), + cfg.IntOpt('rabbit_retry_interval', + default=1, + help='how frequently to retry connecting with RabbitMQ'), + cfg.IntOpt('rabbit_retry_backoff', + default=2, + help='how long to backoff for between retries when connecting ' + 'to RabbitMQ'), + cfg.IntOpt('rabbit_max_retries', + default=0, + help='maximum retries with trying to connect to RabbitMQ ' + '(the default of 0 implies an infinite retry count)'), + cfg.BoolOpt('rabbit_durable_queues', + default=False, + help='use durable queues in RabbitMQ'), + cfg.BoolOpt('rabbit_ha_queues', + default=False, + help='use H/A queues in RabbitMQ (x-ha-policy: all).' + 'You need to wipe RabbitMQ database when ' + 'changing this option.'), + +] + +cfg.CONF.register_opts(kombu_opts) + +LOG = rpc_common.LOG + + +def _get_queue_arguments(conf): + """Construct the arguments for declaring a queue. + + If the rabbit_ha_queues option is set, we declare a mirrored queue + as described here: + + http://www.rabbitmq.com/ha.html + + Setting x-ha-policy to all means that the queue will be mirrored + to all nodes in the cluster. + """ + return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {} + + +class ConsumerBase(object): + """Consumer base class.""" + + def __init__(self, channel, callback, tag, **kwargs): + """Declare a queue on an amqp channel. + + 'channel' is the amqp channel to use + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + queue name, exchange name, and other kombu options are + passed in here as a dictionary. + """ + self.callback = callback + self.tag = str(tag) + self.kwargs = kwargs + self.queue = None + self.reconnect(channel) + + def reconnect(self, channel): + """Re-declare the queue after a rabbit reconnect""" + self.channel = channel + self.kwargs['channel'] = channel + self.queue = kombu.entity.Queue(**self.kwargs) + self.queue.declare() + + def consume(self, *args, **kwargs): + """Actually declare the consumer on the amqp channel. This will + start the flow of messages from the queue. Using the + Connection.iterconsume() iterator will process the messages, + calling the appropriate callback. + + If a callback is specified in kwargs, use that. Otherwise, + use the callback passed during __init__() + + If kwargs['nowait'] is True, then this call will block until + a message is read. + + Messages will automatically be acked if the callback doesn't + raise an exception + """ + + options = {'consumer_tag': self.tag} + options['nowait'] = kwargs.get('nowait', False) + callback = kwargs.get('callback', self.callback) + if not callback: + raise ValueError("No callback defined") + + def _callback(raw_message): + message = self.channel.message_to_python(raw_message) + try: + callback(message.payload) + message.ack() + except Exception: + LOG.exception(_("Failed to process message... skipping it.")) + + self.queue.consume(*args, callback=_callback, **options) + + def cancel(self): + """Cancel the consuming from the queue, if it has started""" + try: + self.queue.cancel(self.tag) + except KeyError, e: + # NOTE(comstud): Kludge to get around a amqplib bug + if str(e) != "u'%s'" % self.tag: + raise + self.queue = None + + +class DirectConsumer(ConsumerBase): + """Queue/consumer class for 'direct'""" + + def __init__(self, conf, channel, msg_id, callback, tag, **kwargs): + """Init a 'direct' queue. + + 'channel' is the amqp channel to use + 'msg_id' is the msg_id to listen on + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + Other kombu options may be passed + """ + # Default options + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + exchange = kombu.entity.Exchange(name=msg_id, + type='direct', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(DirectConsumer, self).__init__(channel, + callback, + tag, + name=msg_id, + exchange=exchange, + routing_key=msg_id, + **options) + + +class TopicConsumer(ConsumerBase): + """Consumer class for 'topic'""" + + def __init__(self, conf, channel, topic, callback, tag, name=None, + exchange_name=None, **kwargs): + """Init a 'topic' queue. + + :param channel: the amqp channel to use + :param topic: the topic to listen on + :paramtype topic: str + :param callback: the callback to call when messages are received + :param tag: a unique ID for the consumer on the channel + :param name: optional queue name, defaults to topic + :paramtype name: str + + Other kombu options may be passed as keyword arguments + """ + # Default options + options = {'durable': conf.rabbit_durable_queues, + 'queue_arguments': _get_queue_arguments(conf), + 'auto_delete': False, + 'exclusive': False} + options.update(kwargs) + exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) + exchange = kombu.entity.Exchange(name=exchange_name, + type='topic', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(TopicConsumer, self).__init__(channel, + callback, + tag, + name=name or topic, + exchange=exchange, + routing_key=topic, + **options) + + +class FanoutConsumer(ConsumerBase): + """Consumer class for 'fanout'""" + + def __init__(self, conf, channel, topic, callback, tag, **kwargs): + """Init a 'fanout' queue. + + 'channel' is the amqp channel to use + 'topic' is the topic to listen on + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + Other kombu options may be passed + """ + unique = uuid.uuid4().hex + exchange_name = '%s_fanout' % topic + queue_name = '%s_fanout_%s' % (topic, unique) + + # Default options + options = {'durable': False, + 'queue_arguments': _get_queue_arguments(conf), + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + exchange = kombu.entity.Exchange(name=exchange_name, type='fanout', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(FanoutConsumer, self).__init__(channel, callback, tag, + name=queue_name, + exchange=exchange, + routing_key=topic, + **options) + + +class Publisher(object): + """Base Publisher class""" + + def __init__(self, channel, exchange_name, routing_key, **kwargs): + """Init the Publisher class with the exchange_name, routing_key, + and other options + """ + self.exchange_name = exchange_name + self.routing_key = routing_key + self.kwargs = kwargs + self.reconnect(channel) + + def reconnect(self, channel): + """Re-establish the Producer after a rabbit reconnection""" + self.exchange = kombu.entity.Exchange(name=self.exchange_name, + **self.kwargs) + self.producer = kombu.messaging.Producer(exchange=self.exchange, + channel=channel, + routing_key=self.routing_key) + + def send(self, msg): + """Send a message""" + self.producer.publish(msg) + + +class DirectPublisher(Publisher): + """Publisher class for 'direct'""" + def __init__(self, conf, channel, msg_id, **kwargs): + """init a 'direct' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + super(DirectPublisher, self).__init__(channel, msg_id, msg_id, + type='direct', **options) + + +class TopicPublisher(Publisher): + """Publisher class for 'topic'""" + def __init__(self, conf, channel, topic, **kwargs): + """init a 'topic' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + options = {'durable': conf.rabbit_durable_queues, + 'auto_delete': False, + 'exclusive': False} + options.update(kwargs) + exchange_name = rpc_amqp.get_control_exchange(conf) + super(TopicPublisher, self).__init__(channel, + exchange_name, + topic, + type='topic', + **options) + + +class FanoutPublisher(Publisher): + """Publisher class for 'fanout'""" + def __init__(self, conf, channel, topic, **kwargs): + """init a 'fanout' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic, + None, type='fanout', **options) + + +class NotifyPublisher(TopicPublisher): + """Publisher class for 'notify'""" + + def __init__(self, conf, channel, topic, **kwargs): + self.durable = kwargs.pop('durable', conf.rabbit_durable_queues) + self.queue_arguments = _get_queue_arguments(conf) + super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs) + + def reconnect(self, channel): + super(NotifyPublisher, self).reconnect(channel) + + # NOTE(jerdfelt): Normally the consumer would create the queue, but + # we do this to ensure that messages don't get dropped if the + # consumer is started after we do + queue = kombu.entity.Queue(channel=channel, + exchange=self.exchange, + durable=self.durable, + name=self.routing_key, + routing_key=self.routing_key, + queue_arguments=self.queue_arguments) + queue.declare() + + +class Connection(object): + """Connection object.""" + + pool = None + + def __init__(self, conf, server_params=None): + self.consumers = [] + self.consumer_thread = None + self.conf = conf + self.max_retries = self.conf.rabbit_max_retries + # Try forever? + if self.max_retries <= 0: + self.max_retries = None + self.interval_start = self.conf.rabbit_retry_interval + self.interval_stepping = self.conf.rabbit_retry_backoff + # max retry-interval = 30 seconds + self.interval_max = 30 + self.memory_transport = False + + if server_params is None: + server_params = {} + # Keys to translate from server_params to kombu params + server_params_to_kombu_params = {'username': 'userid'} + + ssl_params = self._fetch_ssl_params() + params_list = [] + for adr in self.conf.rabbit_hosts: + hostname, port = network_utils.parse_host_port( + adr, default_port=self.conf.rabbit_port) + + params = { + 'hostname': hostname, + 'port': port, + 'userid': self.conf.rabbit_userid, + 'password': self.conf.rabbit_password, + 'virtual_host': self.conf.rabbit_virtual_host, + } + + for sp_key, value in server_params.iteritems(): + p_key = server_params_to_kombu_params.get(sp_key, sp_key) + params[p_key] = value + + if self.conf.fake_rabbit: + params['transport'] = 'memory' + if self.conf.rabbit_use_ssl: + params['ssl'] = ssl_params + + params_list.append(params) + + self.params_list = params_list + + self.memory_transport = self.conf.fake_rabbit + + self.connection = None + self.reconnect() + + def _fetch_ssl_params(self): + """Handles fetching what ssl params + should be used for the connection (if any)""" + ssl_params = dict() + + # http://docs.python.org/library/ssl.html - ssl.wrap_socket + if self.conf.kombu_ssl_version: + ssl_params['ssl_version'] = self.conf.kombu_ssl_version + if self.conf.kombu_ssl_keyfile: + ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile + if self.conf.kombu_ssl_certfile: + ssl_params['certfile'] = self.conf.kombu_ssl_certfile + if self.conf.kombu_ssl_ca_certs: + ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs + # We might want to allow variations in the + # future with this? + ssl_params['cert_reqs'] = ssl.CERT_REQUIRED + + if not ssl_params: + # Just have the default behavior + return True + else: + # Return the extended behavior + return ssl_params + + def _connect(self, params): + """Connect to rabbit. Re-establish any queues that may have + been declared before if we are reconnecting. Exceptions should + be handled by the caller. + """ + if self.connection: + LOG.info(_("Reconnecting to AMQP server on " + "%(hostname)s:%(port)d") % params) + try: + self.connection.close() + except self.connection_errors: + pass + # Setting this in case the next statement fails, though + # it shouldn't be doing any network operations, yet. + self.connection = None + self.connection = kombu.connection.BrokerConnection(**params) + self.connection_errors = self.connection.connection_errors + if self.memory_transport: + # Kludge to speed up tests. + self.connection.transport.polling_interval = 0.0 + self.consumer_num = itertools.count(1) + self.connection.connect() + self.channel = self.connection.channel() + # work around 'memory' transport bug in 1.1.3 + if self.memory_transport: + self.channel._new_queue('ae.undeliver') + for consumer in self.consumers: + consumer.reconnect(self.channel) + LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') % + params) + + def reconnect(self): + """Handles reconnecting and re-establishing queues. + Will retry up to self.max_retries number of times. + self.max_retries = 0 means to retry forever. + Sleep between tries, starting at self.interval_start + seconds, backing off self.interval_stepping number of seconds + each attempt. + """ + + attempt = 0 + while True: + params = self.params_list[attempt % len(self.params_list)] + attempt += 1 + try: + self._connect(params) + return + except (IOError, self.connection_errors) as e: + pass + except Exception, e: + # NOTE(comstud): Unfortunately it's possible for amqplib + # to return an error not covered by its transport + # connection_errors in the case of a timeout waiting for + # a protocol response. (See paste link in LP888621) + # So, we check all exceptions for 'timeout' in them + # and try to reconnect in this case. + if 'timeout' not in str(e): + raise + + log_info = {} + log_info['err_str'] = str(e) + log_info['max_retries'] = self.max_retries + log_info.update(params) + + if self.max_retries and attempt == self.max_retries: + LOG.error(_('Unable to connect to AMQP server on ' + '%(hostname)s:%(port)d after %(max_retries)d ' + 'tries: %(err_str)s') % log_info) + # NOTE(comstud): Copied from original code. There's + # really no better recourse because if this was a queue we + # need to consume on, we have no way to consume anymore. + sys.exit(1) + + if attempt == 1: + sleep_time = self.interval_start or 1 + elif attempt > 1: + sleep_time += self.interval_stepping + if self.interval_max: + sleep_time = min(sleep_time, self.interval_max) + + log_info['sleep_time'] = sleep_time + LOG.error(_('AMQP server on %(hostname)s:%(port)d is ' + 'unreachable: %(err_str)s. Trying again in ' + '%(sleep_time)d seconds.') % log_info) + time.sleep(sleep_time) + + def ensure(self, error_callback, method, *args, **kwargs): + while True: + try: + return method(*args, **kwargs) + except (self.connection_errors, socket.timeout, IOError), e: + if error_callback: + error_callback(e) + except Exception, e: + # NOTE(comstud): Unfortunately it's possible for amqplib + # to return an error not covered by its transport + # connection_errors in the case of a timeout waiting for + # a protocol response. (See paste link in LP888621) + # So, we check all exceptions for 'timeout' in them + # and try to reconnect in this case. + if 'timeout' not in str(e): + raise + if error_callback: + error_callback(e) + self.reconnect() + + def get_channel(self): + """Convenience call for bin/clear_rabbit_queues""" + return self.channel + + def close(self): + """Close/release this connection""" + self.cancel_consumer_thread() + self.connection.release() + self.connection = None + + def reset(self): + """Reset a connection so it can be used again""" + self.cancel_consumer_thread() + self.channel.close() + self.channel = self.connection.channel() + # work around 'memory' transport bug in 1.1.3 + if self.memory_transport: + self.channel._new_queue('ae.undeliver') + self.consumers = [] + + def declare_consumer(self, consumer_cls, topic, callback): + """Create a Consumer using the class that was passed in and + add it to our list of consumers + """ + + def _connect_error(exc): + log_info = {'topic': topic, 'err_str': str(exc)} + LOG.error(_("Failed to declare consumer for topic '%(topic)s': " + "%(err_str)s") % log_info) + + def _declare_consumer(): + consumer = consumer_cls(self.conf, self.channel, topic, callback, + self.consumer_num.next()) + self.consumers.append(consumer) + return consumer + + return self.ensure(_connect_error, _declare_consumer) + + def iterconsume(self, limit=None, timeout=None): + """Return an iterator that will consume from all queues/consumers""" + + info = {'do_consume': True} + + def _error_callback(exc): + if isinstance(exc, socket.timeout): + LOG.exception(_('Timed out waiting for RPC response: %s') % + str(exc)) + raise rpc_common.Timeout() + else: + LOG.exception(_('Failed to consume message from queue: %s') % + str(exc)) + info['do_consume'] = True + + def _consume(): + if info['do_consume']: + queues_head = self.consumers[:-1] + queues_tail = self.consumers[-1] + for queue in queues_head: + queue.consume(nowait=True) + queues_tail.consume(nowait=False) + info['do_consume'] = False + return self.connection.drain_events(timeout=timeout) + + for iteration in itertools.count(0): + if limit and iteration >= limit: + raise StopIteration + yield self.ensure(_error_callback, _consume) + + def cancel_consumer_thread(self): + """Cancel a consumer thread""" + if self.consumer_thread is not None: + self.consumer_thread.kill() + try: + self.consumer_thread.wait() + except greenlet.GreenletExit: + pass + self.consumer_thread = None + + def publisher_send(self, cls, topic, msg, **kwargs): + """Send to a publisher based on the publisher class""" + + def _error_callback(exc): + log_info = {'topic': topic, 'err_str': str(exc)} + LOG.exception(_("Failed to publish message to topic " + "'%(topic)s': %(err_str)s") % log_info) + + def _publish(): + publisher = cls(self.conf, self.channel, topic, **kwargs) + publisher.send(msg) + + self.ensure(_error_callback, _publish) + + def declare_direct_consumer(self, topic, callback): + """Create a 'direct' queue. + In nova's use, this is generally a msg_id queue used for + responses for call/multicall + """ + self.declare_consumer(DirectConsumer, topic, callback) + + def declare_topic_consumer(self, topic, callback=None, queue_name=None, + exchange_name=None): + """Create a 'topic' consumer.""" + self.declare_consumer(functools.partial(TopicConsumer, + name=queue_name, + exchange_name=exchange_name, + ), + topic, callback) + + def declare_fanout_consumer(self, topic, callback): + """Create a 'fanout' consumer""" + self.declare_consumer(FanoutConsumer, topic, callback) + + def direct_send(self, msg_id, msg): + """Send a 'direct' message""" + self.publisher_send(DirectPublisher, msg_id, msg) + + def topic_send(self, topic, msg): + """Send a 'topic' message""" + self.publisher_send(TopicPublisher, topic, msg) + + def fanout_send(self, topic, msg): + """Send a 'fanout' message""" + self.publisher_send(FanoutPublisher, topic, msg) + + def notify_send(self, topic, msg, **kwargs): + """Send a notify message on a topic""" + self.publisher_send(NotifyPublisher, topic, msg, **kwargs) + + def consume(self, limit=None): + """Consume from all queues/consumers""" + it = self.iterconsume(limit=limit) + while True: + try: + it.next() + except StopIteration: + return + + def consume_in_thread(self): + """Consumer from all queues/consumers in a greenthread""" + def _consumer_thread(): + try: + self.consume() + except greenlet.GreenletExit: + return + if self.consumer_thread is None: + self.consumer_thread = eventlet.spawn(_consumer_thread) + return self.consumer_thread + + def create_consumer(self, topic, proxy, fanout=False): + """Create a consumer that calls a method in a proxy object""" + proxy_cb = rpc_amqp.ProxyCallback( + self.conf, proxy, + rpc_amqp.get_connection_pool(self.conf, Connection)) + + if fanout: + self.declare_fanout_consumer(topic, proxy_cb) + else: + self.declare_topic_consumer(topic, proxy_cb) + + def create_worker(self, topic, proxy, pool_name): + """Create a worker that calls a method in a proxy object""" + proxy_cb = rpc_amqp.ProxyCallback( + self.conf, proxy, + rpc_amqp.get_connection_pool(self.conf, Connection)) + self.declare_topic_consumer(topic, proxy_cb, pool_name) + + +def create_connection(conf, new=True): + """Create a connection""" + return rpc_amqp.create_connection( + conf, new, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def multicall(conf, context, topic, msg, timeout=None): + """Make a call that returns multiple times.""" + return rpc_amqp.multicall( + conf, context, topic, msg, timeout, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def call(conf, context, topic, msg, timeout=None): + """Sends a message on a topic and wait for a response.""" + return rpc_amqp.call( + conf, context, topic, msg, timeout, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def cast(conf, context, topic, msg): + """Sends a message on a topic without waiting for a response.""" + return rpc_amqp.cast( + conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def fanout_cast(conf, context, topic, msg): + """Sends a message on a fanout exchange without waiting for a response.""" + return rpc_amqp.fanout_cast( + conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def cast_to_server(conf, context, server_params, topic, msg): + """Sends a message on a topic to a specific server.""" + return rpc_amqp.cast_to_server( + conf, context, server_params, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def fanout_cast_to_server(conf, context, server_params, topic, msg): + """Sends a message on a fanout exchange to a specific server.""" + return rpc_amqp.fanout_cast_to_server( + conf, context, server_params, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def notify(conf, context, topic, msg): + """Sends a notification event on a topic.""" + return rpc_amqp.notify( + conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def cleanup(): + return rpc_amqp.cleanup(Connection.pool) diff --git a/cloudbaseinit/openstack/common/rpc/impl_qpid.py b/cloudbaseinit/openstack/common/rpc/impl_qpid.py new file mode 100644 index 00000000..e8bcc479 --- /dev/null +++ b/cloudbaseinit/openstack/common/rpc/impl_qpid.py @@ -0,0 +1,585 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# Copyright 2011 - 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools +import itertools +import time +import uuid + +import eventlet +import greenlet +import qpid.messaging +import qpid.messaging.exceptions + +from cloudbaseinit.openstack.common import cfg +from cloudbaseinit.openstack.common.gettextutils import _ +from cloudbaseinit.openstack.common import jsonutils +from cloudbaseinit.openstack.common import log as logging +from cloudbaseinit.openstack.common.rpc import amqp as rpc_amqp +from cloudbaseinit.openstack.common.rpc import common as rpc_common + +LOG = logging.getLogger(__name__) + +qpid_opts = [ + cfg.StrOpt('qpid_hostname', + default='localhost', + help='Qpid broker hostname'), + cfg.StrOpt('qpid_port', + default='5672', + help='Qpid broker port'), + cfg.ListOpt('qpid_hosts', + default=['$qpid_hostname:$qpid_port'], + help='Qpid HA cluster host:port pairs'), + cfg.StrOpt('qpid_username', + default='', + help='Username for qpid connection'), + cfg.StrOpt('qpid_password', + default='', + help='Password for qpid connection'), + cfg.StrOpt('qpid_sasl_mechanisms', + default='', + help='Space separated list of SASL mechanisms to use for auth'), + cfg.IntOpt('qpid_heartbeat', + default=60, + help='Seconds between connection keepalive heartbeats'), + cfg.StrOpt('qpid_protocol', + default='tcp', + help="Transport to use, either 'tcp' or 'ssl'"), + cfg.BoolOpt('qpid_tcp_nodelay', + default=True, + help='Disable Nagle algorithm'), +] + +cfg.CONF.register_opts(qpid_opts) + + +class ConsumerBase(object): + """Consumer base class.""" + + def __init__(self, session, callback, node_name, node_opts, + link_name, link_opts): + """Declare a queue on an amqp session. + + 'session' is the amqp session to use + 'callback' is the callback to call when messages are received + 'node_name' is the first part of the Qpid address string, before ';' + 'node_opts' will be applied to the "x-declare" section of "node" + in the address string. + 'link_name' goes into the "name" field of the "link" in the address + string + 'link_opts' will be applied to the "x-declare" section of "link" + in the address string. + """ + self.callback = callback + self.receiver = None + self.session = None + + addr_opts = { + "create": "always", + "node": { + "type": "topic", + "x-declare": { + "durable": True, + "auto-delete": True, + }, + }, + "link": { + "name": link_name, + "durable": True, + "x-declare": { + "durable": False, + "auto-delete": True, + "exclusive": False, + }, + }, + } + addr_opts["node"]["x-declare"].update(node_opts) + addr_opts["link"]["x-declare"].update(link_opts) + + self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) + + self.reconnect(session) + + def reconnect(self, session): + """Re-declare the receiver after a qpid reconnect""" + self.session = session + self.receiver = session.receiver(self.address) + self.receiver.capacity = 1 + + def consume(self): + """Fetch the message and pass it to the callback object""" + message = self.receiver.fetch() + try: + self.callback(message.content) + except Exception: + LOG.exception(_("Failed to process message... skipping it.")) + finally: + self.session.acknowledge(message) + + def get_receiver(self): + return self.receiver + + +class DirectConsumer(ConsumerBase): + """Queue/consumer class for 'direct'""" + + def __init__(self, conf, session, msg_id, callback): + """Init a 'direct' queue. + + 'session' is the amqp session to use + 'msg_id' is the msg_id to listen on + 'callback' is the callback to call when messages are received + """ + + super(DirectConsumer, self).__init__(session, callback, + "%s/%s" % (msg_id, msg_id), + {"type": "direct"}, + msg_id, + {"exclusive": True}) + + +class TopicConsumer(ConsumerBase): + """Consumer class for 'topic'""" + + def __init__(self, conf, session, topic, callback, name=None, + exchange_name=None): + """Init a 'topic' queue. + + :param session: the amqp session to use + :param topic: is the topic to listen on + :paramtype topic: str + :param callback: the callback to call when messages are received + :param name: optional queue name, defaults to topic + """ + + exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) + super(TopicConsumer, self).__init__(session, callback, + "%s/%s" % (exchange_name, topic), + {}, name or topic, {}) + + +class FanoutConsumer(ConsumerBase): + """Consumer class for 'fanout'""" + + def __init__(self, conf, session, topic, callback): + """Init a 'fanout' queue. + + 'session' is the amqp session to use + 'topic' is the topic to listen on + 'callback' is the callback to call when messages are received + """ + + super(FanoutConsumer, self).__init__( + session, callback, + "%s_fanout" % topic, + {"durable": False, "type": "fanout"}, + "%s_fanout_%s" % (topic, uuid.uuid4().hex), + {"exclusive": True}) + + +class Publisher(object): + """Base Publisher class""" + + def __init__(self, session, node_name, node_opts=None): + """Init the Publisher class with the exchange_name, routing_key, + and other options + """ + self.sender = None + self.session = session + + addr_opts = { + "create": "always", + "node": { + "type": "topic", + "x-declare": { + "durable": False, + # auto-delete isn't implemented for exchanges in qpid, + # but put in here anyway + "auto-delete": True, + }, + }, + } + if node_opts: + addr_opts["node"]["x-declare"].update(node_opts) + + self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) + + self.reconnect(session) + + def reconnect(self, session): + """Re-establish the Sender after a reconnection""" + self.sender = session.sender(self.address) + + def send(self, msg): + """Send a message""" + self.sender.send(msg) + + +class DirectPublisher(Publisher): + """Publisher class for 'direct'""" + def __init__(self, conf, session, msg_id): + """Init a 'direct' publisher.""" + super(DirectPublisher, self).__init__(session, msg_id, + {"type": "Direct"}) + + +class TopicPublisher(Publisher): + """Publisher class for 'topic'""" + def __init__(self, conf, session, topic): + """init a 'topic' publisher. + """ + exchange_name = rpc_amqp.get_control_exchange(conf) + super(TopicPublisher, self).__init__(session, + "%s/%s" % (exchange_name, topic)) + + +class FanoutPublisher(Publisher): + """Publisher class for 'fanout'""" + def __init__(self, conf, session, topic): + """init a 'fanout' publisher. + """ + super(FanoutPublisher, self).__init__( + session, + "%s_fanout" % topic, {"type": "fanout"}) + + +class NotifyPublisher(Publisher): + """Publisher class for notifications""" + def __init__(self, conf, session, topic): + """init a 'topic' publisher. + """ + exchange_name = rpc_amqp.get_control_exchange(conf) + super(NotifyPublisher, self).__init__(session, + "%s/%s" % (exchange_name, topic), + {"durable": True}) + + +class Connection(object): + """Connection object.""" + + pool = None + + def __init__(self, conf, server_params=None): + self.session = None + self.consumers = {} + self.consumer_thread = None + self.conf = conf + + params = { + 'qpid_hosts': self.conf.qpid_hosts, + 'username': self.conf.qpid_username, + 'password': self.conf.qpid_password, + } + params.update(server_params or {}) + + self.brokers = params['qpid_hosts'] + self.username = params['username'] + self.password = params['password'] + self.connection_create(self.brokers[0]) + self.reconnect() + + def connection_create(self, broker): + # Create the connection - this does not open the connection + self.connection = qpid.messaging.Connection(broker) + + # Check if flags are set and if so set them for the connection + # before we call open + self.connection.username = self.username + self.connection.password = self.password + + self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms + # Reconnection is done by self.reconnect() + self.connection.reconnect = False + self.connection.heartbeat = self.conf.qpid_heartbeat + self.connection.protocol = self.conf.qpid_protocol + self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay + + def _register_consumer(self, consumer): + self.consumers[str(consumer.get_receiver())] = consumer + + def _lookup_consumer(self, receiver): + return self.consumers[str(receiver)] + + def reconnect(self): + """Handles reconnecting and re-establishing sessions and queues""" + if self.connection.opened(): + try: + self.connection.close() + except qpid.messaging.exceptions.ConnectionError: + pass + + attempt = 0 + delay = 1 + while True: + broker = self.brokers[attempt % len(self.brokers)] + attempt += 1 + + try: + self.connection_create(broker) + self.connection.open() + except qpid.messaging.exceptions.ConnectionError, e: + msg_dict = dict(e=e, delay=delay) + msg = _("Unable to connect to AMQP server: %(e)s. " + "Sleeping %(delay)s seconds") % msg_dict + LOG.error(msg) + time.sleep(delay) + delay = min(2 * delay, 60) + else: + LOG.info(_('Connected to AMQP server on %s'), broker) + break + + self.session = self.connection.session() + + if self.consumers: + consumers = self.consumers + self.consumers = {} + + for consumer in consumers.itervalues(): + consumer.reconnect(self.session) + self._register_consumer(consumer) + + LOG.debug(_("Re-established AMQP queues")) + + def ensure(self, error_callback, method, *args, **kwargs): + while True: + try: + return method(*args, **kwargs) + except (qpid.messaging.exceptions.Empty, + qpid.messaging.exceptions.ConnectionError), e: + if error_callback: + error_callback(e) + self.reconnect() + + def close(self): + """Close/release this connection""" + self.cancel_consumer_thread() + self.connection.close() + self.connection = None + + def reset(self): + """Reset a connection so it can be used again""" + self.cancel_consumer_thread() + self.session.close() + self.session = self.connection.session() + self.consumers = {} + + def declare_consumer(self, consumer_cls, topic, callback): + """Create a Consumer using the class that was passed in and + add it to our list of consumers + """ + def _connect_error(exc): + log_info = {'topic': topic, 'err_str': str(exc)} + LOG.error(_("Failed to declare consumer for topic '%(topic)s': " + "%(err_str)s") % log_info) + + def _declare_consumer(): + consumer = consumer_cls(self.conf, self.session, topic, callback) + self._register_consumer(consumer) + return consumer + + return self.ensure(_connect_error, _declare_consumer) + + def iterconsume(self, limit=None, timeout=None): + """Return an iterator that will consume from all queues/consumers""" + + def _error_callback(exc): + if isinstance(exc, qpid.messaging.exceptions.Empty): + LOG.exception(_('Timed out waiting for RPC response: %s') % + str(exc)) + raise rpc_common.Timeout() + else: + LOG.exception(_('Failed to consume message from queue: %s') % + str(exc)) + + def _consume(): + nxt_receiver = self.session.next_receiver(timeout=timeout) + try: + self._lookup_consumer(nxt_receiver).consume() + except Exception: + LOG.exception(_("Error processing message. Skipping it.")) + + for iteration in itertools.count(0): + if limit and iteration >= limit: + raise StopIteration + yield self.ensure(_error_callback, _consume) + + def cancel_consumer_thread(self): + """Cancel a consumer thread""" + if self.consumer_thread is not None: + self.consumer_thread.kill() + try: + self.consumer_thread.wait() + except greenlet.GreenletExit: + pass + self.consumer_thread = None + + def publisher_send(self, cls, topic, msg): + """Send to a publisher based on the publisher class""" + + def _connect_error(exc): + log_info = {'topic': topic, 'err_str': str(exc)} + LOG.exception(_("Failed to publish message to topic " + "'%(topic)s': %(err_str)s") % log_info) + + def _publisher_send(): + publisher = cls(self.conf, self.session, topic) + publisher.send(msg) + + return self.ensure(_connect_error, _publisher_send) + + def declare_direct_consumer(self, topic, callback): + """Create a 'direct' queue. + In nova's use, this is generally a msg_id queue used for + responses for call/multicall + """ + self.declare_consumer(DirectConsumer, topic, callback) + + def declare_topic_consumer(self, topic, callback=None, queue_name=None, + exchange_name=None): + """Create a 'topic' consumer.""" + self.declare_consumer(functools.partial(TopicConsumer, + name=queue_name, + exchange_name=exchange_name, + ), + topic, callback) + + def declare_fanout_consumer(self, topic, callback): + """Create a 'fanout' consumer""" + self.declare_consumer(FanoutConsumer, topic, callback) + + def direct_send(self, msg_id, msg): + """Send a 'direct' message""" + self.publisher_send(DirectPublisher, msg_id, msg) + + def topic_send(self, topic, msg): + """Send a 'topic' message""" + self.publisher_send(TopicPublisher, topic, msg) + + def fanout_send(self, topic, msg): + """Send a 'fanout' message""" + self.publisher_send(FanoutPublisher, topic, msg) + + def notify_send(self, topic, msg, **kwargs): + """Send a notify message on a topic""" + self.publisher_send(NotifyPublisher, topic, msg) + + def consume(self, limit=None): + """Consume from all queues/consumers""" + it = self.iterconsume(limit=limit) + while True: + try: + it.next() + except StopIteration: + return + + def consume_in_thread(self): + """Consumer from all queues/consumers in a greenthread""" + def _consumer_thread(): + try: + self.consume() + except greenlet.GreenletExit: + return + if self.consumer_thread is None: + self.consumer_thread = eventlet.spawn(_consumer_thread) + return self.consumer_thread + + def create_consumer(self, topic, proxy, fanout=False): + """Create a consumer that calls a method in a proxy object""" + proxy_cb = rpc_amqp.ProxyCallback( + self.conf, proxy, + rpc_amqp.get_connection_pool(self.conf, Connection)) + + if fanout: + consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb) + else: + consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb) + + self._register_consumer(consumer) + + return consumer + + def create_worker(self, topic, proxy, pool_name): + """Create a worker that calls a method in a proxy object""" + proxy_cb = rpc_amqp.ProxyCallback( + self.conf, proxy, + rpc_amqp.get_connection_pool(self.conf, Connection)) + + consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb, + name=pool_name) + + self._register_consumer(consumer) + + return consumer + + +def create_connection(conf, new=True): + """Create a connection""" + return rpc_amqp.create_connection( + conf, new, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def multicall(conf, context, topic, msg, timeout=None): + """Make a call that returns multiple times.""" + return rpc_amqp.multicall( + conf, context, topic, msg, timeout, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def call(conf, context, topic, msg, timeout=None): + """Sends a message on a topic and wait for a response.""" + return rpc_amqp.call( + conf, context, topic, msg, timeout, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def cast(conf, context, topic, msg): + """Sends a message on a topic without waiting for a response.""" + return rpc_amqp.cast( + conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def fanout_cast(conf, context, topic, msg): + """Sends a message on a fanout exchange without waiting for a response.""" + return rpc_amqp.fanout_cast( + conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def cast_to_server(conf, context, server_params, topic, msg): + """Sends a message on a topic to a specific server.""" + return rpc_amqp.cast_to_server( + conf, context, server_params, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def fanout_cast_to_server(conf, context, server_params, topic, msg): + """Sends a message on a fanout exchange to a specific server.""" + return rpc_amqp.fanout_cast_to_server( + conf, context, server_params, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def notify(conf, context, topic, msg): + """Sends a notification event on a topic.""" + return rpc_amqp.notify(conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def cleanup(): + return rpc_amqp.cleanup(Connection.pool) diff --git a/cloudbaseinit/openstack/common/rpc/impl_zmq.py b/cloudbaseinit/openstack/common/rpc/impl_zmq.py new file mode 100644 index 00000000..9d5fceae --- /dev/null +++ b/cloudbaseinit/openstack/common/rpc/impl_zmq.py @@ -0,0 +1,718 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import pprint +import socket +import string +import sys +import types +import uuid + +import eventlet +from eventlet.green import zmq +import greenlet + +from cloudbaseinit.openstack.common import cfg +from cloudbaseinit.openstack.common.gettextutils import _ +from cloudbaseinit.openstack.common import importutils +from cloudbaseinit.openstack.common import jsonutils +from cloudbaseinit.openstack.common.rpc import common as rpc_common + + +# for convenience, are not modified. +pformat = pprint.pformat +Timeout = eventlet.timeout.Timeout +LOG = rpc_common.LOG +RemoteError = rpc_common.RemoteError +RPCException = rpc_common.RPCException + +zmq_opts = [ + cfg.StrOpt('rpc_zmq_bind_address', default='*', + help='ZeroMQ bind address. Should be a wildcard (*), ' + 'an ethernet interface, or IP. ' + 'The "host" option should point or resolve to this ' + 'address.'), + + # The module.Class to use for matchmaking. + cfg.StrOpt( + 'rpc_zmq_matchmaker', + default=('cloudbaseinit.openstack.common.rpc.' + 'matchmaker.MatchMakerLocalhost'), + help='MatchMaker driver', + ), + + # The following port is unassigned by IANA as of 2012-05-21 + cfg.IntOpt('rpc_zmq_port', default=9501, + help='ZeroMQ receiver listening port'), + + cfg.IntOpt('rpc_zmq_contexts', default=1, + help='Number of ZeroMQ contexts, defaults to 1'), + + cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack', + help='Directory for holding IPC sockets'), + + cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(), + help='Name of this node. Must be a valid hostname, FQDN, or ' + 'IP address. Must match "host" option, if running Nova.') +] + + +# These globals are defined in register_opts(conf), +# a mandatory initialization call +CONF = None +ZMQ_CTX = None # ZeroMQ Context, must be global. +matchmaker = None # memoized matchmaker object + + +def _serialize(data): + """ + Serialization wrapper + We prefer using JSON, but it cannot encode all types. + Error if a developer passes us bad data. + """ + try: + return str(jsonutils.dumps(data, ensure_ascii=True)) + except TypeError: + LOG.error(_("JSON serialization failed.")) + raise + + +def _deserialize(data): + """ + Deserialization wrapper + """ + LOG.debug(_("Deserializing: %s"), data) + return jsonutils.loads(data) + + +class ZmqSocket(object): + """ + A tiny wrapper around ZeroMQ to simplify the send/recv protocol + and connection management. + + Can be used as a Context (supports the 'with' statement). + """ + + def __init__(self, addr, zmq_type, bind=True, subscribe=None): + self.sock = ZMQ_CTX.socket(zmq_type) + self.addr = addr + self.type = zmq_type + self.subscriptions = [] + + # Support failures on sending/receiving on wrong socket type. + self.can_recv = zmq_type in (zmq.PULL, zmq.SUB) + self.can_send = zmq_type in (zmq.PUSH, zmq.PUB) + self.can_sub = zmq_type in (zmq.SUB, ) + + # Support list, str, & None for subscribe arg (cast to list) + do_sub = { + list: subscribe, + str: [subscribe], + type(None): [] + }[type(subscribe)] + + for f in do_sub: + self.subscribe(f) + + str_data = {'addr': addr, 'type': self.socket_s(), + 'subscribe': subscribe, 'bind': bind} + + LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data) + LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data) + LOG.debug(_("-> bind: %(bind)s"), str_data) + + try: + if bind: + self.sock.bind(addr) + else: + self.sock.connect(addr) + except Exception: + raise RPCException(_("Could not open socket.")) + + def socket_s(self): + """Get socket type as string.""" + t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER', + 'DEALER') + return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type] + + def subscribe(self, msg_filter): + """Subscribe.""" + if not self.can_sub: + raise RPCException("Cannot subscribe on this socket.") + LOG.debug(_("Subscribing to %s"), msg_filter) + + try: + self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter) + except Exception: + return + + self.subscriptions.append(msg_filter) + + def unsubscribe(self, msg_filter): + """Unsubscribe.""" + if msg_filter not in self.subscriptions: + return + self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter) + self.subscriptions.remove(msg_filter) + + def close(self): + if self.sock is None or self.sock.closed: + return + + # We must unsubscribe, or we'll leak descriptors. + if len(self.subscriptions) > 0: + for f in self.subscriptions: + try: + self.sock.setsockopt(zmq.UNSUBSCRIBE, f) + except Exception: + pass + self.subscriptions = [] + + # Linger -1 prevents lost/dropped messages + try: + self.sock.close(linger=-1) + except Exception: + pass + self.sock = None + + def recv(self): + if not self.can_recv: + raise RPCException(_("You cannot recv on this socket.")) + return self.sock.recv_multipart() + + def send(self, data): + if not self.can_send: + raise RPCException(_("You cannot send on this socket.")) + self.sock.send_multipart(data) + + +class ZmqClient(object): + """Client for ZMQ sockets.""" + + def __init__(self, addr, socket_type=zmq.PUSH, bind=False): + self.outq = ZmqSocket(addr, socket_type, bind=bind) + + def cast(self, msg_id, topic, data): + self.outq.send([str(msg_id), str(topic), str('cast'), + _serialize(data)]) + + def close(self): + self.outq.close() + + +class RpcContext(rpc_common.CommonRpcContext): + """Context that supports replying to a rpc.call.""" + def __init__(self, **kwargs): + self.replies = [] + super(RpcContext, self).__init__(**kwargs) + + def deepcopy(self): + values = self.to_dict() + values['replies'] = self.replies + return self.__class__(**values) + + def reply(self, reply=None, failure=None, ending=False): + if ending: + return + self.replies.append(reply) + + @classmethod + def marshal(self, ctx): + ctx_data = ctx.to_dict() + return _serialize(ctx_data) + + @classmethod + def unmarshal(self, data): + return RpcContext.from_dict(_deserialize(data)) + + +class InternalContext(object): + """Used by ConsumerBase as a private context for - methods.""" + + def __init__(self, proxy): + self.proxy = proxy + self.msg_waiter = None + + def _get_response(self, ctx, proxy, topic, data): + """Process a curried message and cast the result to topic.""" + LOG.debug(_("Running func with context: %s"), ctx.to_dict()) + data.setdefault('version', None) + data.setdefault('args', []) + + try: + result = proxy.dispatch( + ctx, data['version'], data['method'], **data['args']) + return ConsumerBase.normalize_reply(result, ctx.replies) + except greenlet.GreenletExit: + # ignore these since they are just from shutdowns + pass + except Exception: + return {'exc': + rpc_common.serialize_remote_exception(sys.exc_info())} + + def reply(self, ctx, proxy, + msg_id=None, context=None, topic=None, msg=None): + """Reply to a casted call.""" + # Our real method is curried into msg['args'] + + child_ctx = RpcContext.unmarshal(msg[0]) + response = ConsumerBase.normalize_reply( + self._get_response(child_ctx, proxy, topic, msg[1]), + ctx.replies) + + LOG.debug(_("Sending reply")) + cast(CONF, ctx, topic, { + 'method': '-process_reply', + 'args': { + 'msg_id': msg_id, + 'response': response + } + }) + + +class ConsumerBase(object): + """Base Consumer.""" + + def __init__(self): + self.private_ctx = InternalContext(None) + + @classmethod + def normalize_reply(self, result, replies): + #TODO(ewindisch): re-evaluate and document this method. + if isinstance(result, types.GeneratorType): + return list(result) + elif replies: + return replies + else: + return [result] + + def process(self, style, target, proxy, ctx, data): + # Method starting with - are + # processed internally. (non-valid method name) + method = data['method'] + + # Internal method + # uses internal context for safety. + if data['method'][0] == '-': + # For reply / process_reply + method = method[1:] + if method == 'reply': + self.private_ctx.reply(ctx, proxy, **data['args']) + return + + data.setdefault('version', None) + data.setdefault('args', []) + proxy.dispatch(ctx, data['version'], + data['method'], **data['args']) + + +class ZmqBaseReactor(ConsumerBase): + """ + A consumer class implementing a + centralized casting broker (PULL-PUSH) + for RoundRobin requests. + """ + + def __init__(self, conf): + super(ZmqBaseReactor, self).__init__() + + self.mapping = {} + self.proxies = {} + self.threads = [] + self.sockets = [] + self.subscribe = {} + + self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size) + + def register(self, proxy, in_addr, zmq_type_in, out_addr=None, + zmq_type_out=None, in_bind=True, out_bind=True, + subscribe=None): + + LOG.info(_("Registering reactor")) + + if zmq_type_in not in (zmq.PULL, zmq.SUB): + raise RPCException("Bad input socktype") + + # Items push in. + inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind, + subscribe=subscribe) + + self.proxies[inq] = proxy + self.sockets.append(inq) + + LOG.info(_("In reactor registered")) + + if not out_addr: + return + + if zmq_type_out not in (zmq.PUSH, zmq.PUB): + raise RPCException("Bad output socktype") + + # Items push out. + outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind) + + self.mapping[inq] = outq + self.mapping[outq] = inq + self.sockets.append(outq) + + LOG.info(_("Out reactor registered")) + + def consume_in_thread(self): + def _consume(sock): + LOG.info(_("Consuming socket")) + while True: + self.consume(sock) + + for k in self.proxies.keys(): + self.threads.append( + self.pool.spawn(_consume, k) + ) + + def wait(self): + for t in self.threads: + t.wait() + + def close(self): + for s in self.sockets: + s.close() + + for t in self.threads: + t.kill() + + +class ZmqProxy(ZmqBaseReactor): + """ + A consumer class implementing a + topic-based proxy, forwarding to + IPC sockets. + """ + + def __init__(self, conf): + super(ZmqProxy, self).__init__(conf) + + self.topic_proxy = {} + ipc_dir = CONF.rpc_zmq_ipc_dir + + self.topic_proxy['zmq_replies'] = \ + ZmqSocket("ipc://%s/zmq_topic_zmq_replies" % (ipc_dir, ), + zmq.PUB, bind=True) + self.sockets.append(self.topic_proxy['zmq_replies']) + + def consume(self, sock): + ipc_dir = CONF.rpc_zmq_ipc_dir + + #TODO(ewindisch): use zero-copy (i.e. references, not copying) + data = sock.recv() + msg_id, topic, style, in_msg = data + topic = topic.split('.', 1)[0] + + LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data))) + + # Handle zmq_replies magic + if topic.startswith('fanout~'): + sock_type = zmq.PUB + elif topic.startswith('zmq_replies'): + sock_type = zmq.PUB + inside = _deserialize(in_msg) + msg_id = inside[-1]['args']['msg_id'] + response = inside[-1]['args']['response'] + LOG.debug(_("->response->%s"), response) + data = [str(msg_id), _serialize(response)] + else: + sock_type = zmq.PUSH + + if not topic in self.topic_proxy: + outq = ZmqSocket("ipc://%s/zmq_topic_%s" % (ipc_dir, topic), + sock_type, bind=True) + self.topic_proxy[topic] = outq + self.sockets.append(outq) + LOG.info(_("Created topic proxy: %s"), topic) + + # It takes some time for a pub socket to open, + # before we can have any faith in doing a send() to it. + if sock_type == zmq.PUB: + eventlet.sleep(.5) + + LOG.debug(_("ROUTER RELAY-OUT START %(data)s") % {'data': data}) + self.topic_proxy[topic].send(data) + LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") % {'data': data}) + + +class ZmqReactor(ZmqBaseReactor): + """ + A consumer class implementing a + consumer for messages. Can also be + used as a 1:1 proxy + """ + + def __init__(self, conf): + super(ZmqReactor, self).__init__(conf) + + def consume(self, sock): + #TODO(ewindisch): use zero-copy (i.e. references, not copying) + data = sock.recv() + LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data) + if sock in self.mapping: + LOG.debug(_("ROUTER RELAY-OUT %(data)s") % { + 'data': data}) + self.mapping[sock].send(data) + return + + msg_id, topic, style, in_msg = data + + ctx, request = _deserialize(in_msg) + ctx = RpcContext.unmarshal(ctx) + + proxy = self.proxies[sock] + + self.pool.spawn_n(self.process, style, topic, + proxy, ctx, request) + + +class Connection(rpc_common.Connection): + """Manages connections and threads.""" + + def __init__(self, conf): + self.reactor = ZmqReactor(conf) + + def create_consumer(self, topic, proxy, fanout=False): + # Only consume on the base topic name. + topic = topic.split('.', 1)[0] + + LOG.info(_("Create Consumer for topic (%(topic)s)") % + {'topic': topic}) + + # Subscription scenarios + if fanout: + subscribe = ('', fanout)[type(fanout) == str] + sock_type = zmq.SUB + topic = 'fanout~' + topic + else: + sock_type = zmq.PULL + subscribe = None + + # Receive messages from (local) proxy + inaddr = "ipc://%s/zmq_topic_%s" % \ + (CONF.rpc_zmq_ipc_dir, topic) + + LOG.debug(_("Consumer is a zmq.%s"), + ['PULL', 'SUB'][sock_type == zmq.SUB]) + + self.reactor.register(proxy, inaddr, sock_type, + subscribe=subscribe, in_bind=False) + + def close(self): + self.reactor.close() + + def wait(self): + self.reactor.wait() + + def consume_in_thread(self): + self.reactor.consume_in_thread() + + +def _cast(addr, context, msg_id, topic, msg, timeout=None): + timeout_cast = timeout or CONF.rpc_cast_timeout + payload = [RpcContext.marshal(context), msg] + + with Timeout(timeout_cast, exception=rpc_common.Timeout): + try: + conn = ZmqClient(addr) + + # assumes cast can't return an exception + conn.cast(msg_id, topic, payload) + except zmq.ZMQError: + raise RPCException("Cast failed. ZMQ Socket Exception") + finally: + if 'conn' in vars(): + conn.close() + + +def _call(addr, context, msg_id, topic, msg, timeout=None): + # timeout_response is how long we wait for a response + timeout = timeout or CONF.rpc_response_timeout + + # The msg_id is used to track replies. + msg_id = uuid.uuid4().hex + + # Replies always come into the reply service. + reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host + + LOG.debug(_("Creating payload")) + # Curry the original request into a reply method. + mcontext = RpcContext.marshal(context) + payload = { + 'method': '-reply', + 'args': { + 'msg_id': msg_id, + 'context': mcontext, + 'topic': reply_topic, + 'msg': [mcontext, msg] + } + } + + LOG.debug(_("Creating queue socket for reply waiter")) + + # Messages arriving async. + # TODO(ewindisch): have reply consumer with dynamic subscription mgmt + with Timeout(timeout, exception=rpc_common.Timeout): + try: + msg_waiter = ZmqSocket( + "ipc://%s/zmq_topic_zmq_replies" % CONF.rpc_zmq_ipc_dir, + zmq.SUB, subscribe=msg_id, bind=False + ) + + LOG.debug(_("Sending cast")) + _cast(addr, context, msg_id, topic, payload) + + LOG.debug(_("Cast sent; Waiting reply")) + # Blocks until receives reply + msg = msg_waiter.recv() + LOG.debug(_("Received message: %s"), msg) + LOG.debug(_("Unpacking response")) + responses = _deserialize(msg[-1]) + # ZMQError trumps the Timeout error. + except zmq.ZMQError: + raise RPCException("ZMQ Socket Error") + finally: + if 'msg_waiter' in vars(): + msg_waiter.close() + + # It seems we don't need to do all of the following, + # but perhaps it would be useful for multicall? + # One effect of this is that we're checking all + # responses for Exceptions. + for resp in responses: + if isinstance(resp, types.DictType) and 'exc' in resp: + raise rpc_common.deserialize_remote_exception(CONF, resp['exc']) + + return responses[-1] + + +def _multi_send(method, context, topic, msg, timeout=None): + """ + Wraps the sending of messages, + dispatches to the matchmaker and sends + message to all relevant hosts. + """ + conf = CONF + LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))}) + + queues = matchmaker.queues(topic) + LOG.debug(_("Sending message(s) to: %s"), queues) + + # Don't stack if we have no matchmaker results + if len(queues) == 0: + LOG.warn(_("No matchmaker results. Not casting.")) + # While not strictly a timeout, callers know how to handle + # this exception and a timeout isn't too big a lie. + raise rpc_common.Timeout, "No match from matchmaker." + + # This supports brokerless fanout (addresses > 1) + for queue in queues: + (_topic, ip_addr) = queue + _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port) + + if method.__name__ == '_cast': + eventlet.spawn_n(method, _addr, context, + _topic, _topic, msg, timeout) + return + return method(_addr, context, _topic, _topic, msg, timeout) + + +def create_connection(conf, new=True): + return Connection(conf) + + +def multicall(conf, *args, **kwargs): + """Multiple calls.""" + return _multi_send(_call, *args, **kwargs) + + +def call(conf, *args, **kwargs): + """Send a message, expect a response.""" + data = _multi_send(_call, *args, **kwargs) + return data[-1] + + +def cast(conf, *args, **kwargs): + """Send a message expecting no reply.""" + _multi_send(_cast, *args, **kwargs) + + +def fanout_cast(conf, context, topic, msg, **kwargs): + """Send a message to all listening and expect no reply.""" + # NOTE(ewindisch): fanout~ is used because it avoid splitting on . + # and acts as a non-subtle hint to the matchmaker and ZmqProxy. + _multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs) + + +def notify(conf, context, topic, msg, **kwargs): + """ + Send notification event. + Notifications are sent to topic-priority. + This differs from the AMQP drivers which send to topic.priority. + """ + # NOTE(ewindisch): dot-priority in rpc notifier does not + # work with our assumptions. + topic.replace('.', '-') + cast(conf, context, topic, msg, **kwargs) + + +def cleanup(): + """Clean up resources in use by implementation.""" + global ZMQ_CTX + global matchmaker + matchmaker = None + ZMQ_CTX.term() + ZMQ_CTX = None + + +def register_opts(conf): + """Registration of options for this driver.""" + #NOTE(ewindisch): ZMQ_CTX and matchmaker + # are initialized here as this is as good + # an initialization method as any. + + # We memoize through these globals + global ZMQ_CTX + global matchmaker + global CONF + + if not CONF: + conf.register_opts(zmq_opts) + CONF = conf + # Don't re-set, if this method is called twice. + if not ZMQ_CTX: + ZMQ_CTX = zmq.Context(conf.rpc_zmq_contexts) + if not matchmaker: + # rpc_zmq_matchmaker should be set to a 'module.Class' + mm_path = conf.rpc_zmq_matchmaker.split('.') + mm_module = '.'.join(mm_path[:-1]) + mm_class = mm_path[-1] + + # Only initialize a class. + if mm_path[-1][0] not in string.ascii_uppercase: + LOG.error(_("Matchmaker could not be loaded.\n" + "rpc_zmq_matchmaker is not a class.")) + raise RPCException(_("Error loading Matchmaker.")) + + mm_impl = importutils.import_module(mm_module) + mm_constructor = getattr(mm_impl, mm_class) + matchmaker = mm_constructor() + + +register_opts(cfg.CONF) diff --git a/cloudbaseinit/openstack/common/rpc/matchmaker.py b/cloudbaseinit/openstack/common/rpc/matchmaker.py new file mode 100644 index 00000000..ff4d4baf --- /dev/null +++ b/cloudbaseinit/openstack/common/rpc/matchmaker.py @@ -0,0 +1,258 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +The MatchMaker classes should except a Topic or Fanout exchange key and +return keys for direct exchanges, per (approximate) AMQP parlance. +""" + +import contextlib +import itertools +import json + +from cloudbaseinit.openstack.common import cfg +from cloudbaseinit.openstack.common.gettextutils import _ +from cloudbaseinit.openstack.common import log as logging + + +matchmaker_opts = [ + # Matchmaker ring file + cfg.StrOpt('matchmaker_ringfile', + default='/etc/nova/matchmaker_ring.json', + help='Matchmaker ring file (JSON)'), +] + +CONF = cfg.CONF +CONF.register_opts(matchmaker_opts) +LOG = logging.getLogger(__name__) +contextmanager = contextlib.contextmanager + + +class MatchMakerException(Exception): + """Signified a match could not be found.""" + message = _("Match not found by MatchMaker.") + + +class Exchange(object): + """ + Implements lookups. + Subclass this to support hashtables, dns, etc. + """ + def __init__(self): + pass + + def run(self, key): + raise NotImplementedError() + + +class Binding(object): + """ + A binding on which to perform a lookup. + """ + def __init__(self): + pass + + def test(self, key): + raise NotImplementedError() + + +class MatchMakerBase(object): + """Match Maker Base Class.""" + + def __init__(self): + # Array of tuples. Index [2] toggles negation, [3] is last-if-true + self.bindings = [] + + def add_binding(self, binding, rule, last=True): + self.bindings.append((binding, rule, False, last)) + + #NOTE(ewindisch): kept the following method in case we implement the + # underlying support. + #def add_negate_binding(self, binding, rule, last=True): + # self.bindings.append((binding, rule, True, last)) + + def queues(self, key): + workers = [] + + # bit is for negate bindings - if we choose to implement it. + # last stops processing rules if this matches. + for (binding, exchange, bit, last) in self.bindings: + if binding.test(key): + workers.extend(exchange.run(key)) + + # Support last. + if last: + return workers + return workers + + +class DirectBinding(Binding): + """ + Specifies a host in the key via a '.' character + Although dots are used in the key, the behavior here is + that it maps directly to a host, thus direct. + """ + def test(self, key): + if '.' in key: + return True + return False + + +class TopicBinding(Binding): + """ + Where a 'bare' key without dots. + AMQP generally considers topic exchanges to be those *with* dots, + but we deviate here in terminology as the behavior here matches + that of a topic exchange (whereas where there are dots, behavior + matches that of a direct exchange. + """ + def test(self, key): + if '.' not in key: + return True + return False + + +class FanoutBinding(Binding): + """Match on fanout keys, where key starts with 'fanout.' string.""" + def test(self, key): + if key.startswith('fanout~'): + return True + return False + + +class StubExchange(Exchange): + """Exchange that does nothing.""" + def run(self, key): + return [(key, None)] + + +class RingExchange(Exchange): + """ + Match Maker where hosts are loaded from a static file containing + a hashmap (JSON formatted). + + __init__ takes optional ring dictionary argument, otherwise + loads the ringfile from CONF.mathcmaker_ringfile. + """ + def __init__(self, ring=None): + super(RingExchange, self).__init__() + + if ring: + self.ring = ring + else: + fh = open(CONF.matchmaker_ringfile, 'r') + self.ring = json.load(fh) + fh.close() + + self.ring0 = {} + for k in self.ring.keys(): + self.ring0[k] = itertools.cycle(self.ring[k]) + + def _ring_has(self, key): + if key in self.ring0: + return True + return False + + +class RoundRobinRingExchange(RingExchange): + """A Topic Exchange based on a hashmap.""" + def __init__(self, ring=None): + super(RoundRobinRingExchange, self).__init__(ring) + + def run(self, key): + if not self._ring_has(key): + LOG.warn( + _("No key defining hosts for topic '%s', " + "see ringfile") % (key, ) + ) + return [] + host = next(self.ring0[key]) + return [(key + '.' + host, host)] + + +class FanoutRingExchange(RingExchange): + """Fanout Exchange based on a hashmap.""" + def __init__(self, ring=None): + super(FanoutRingExchange, self).__init__(ring) + + def run(self, key): + # Assume starts with "fanout~", strip it for lookup. + nkey = key.split('fanout~')[1:][0] + if not self._ring_has(nkey): + LOG.warn( + _("No key defining hosts for topic '%s', " + "see ringfile") % (nkey, ) + ) + return [] + return map(lambda x: (key + '.' + x, x), self.ring[nkey]) + + +class LocalhostExchange(Exchange): + """Exchange where all direct topics are local.""" + def __init__(self): + super(Exchange, self).__init__() + + def run(self, key): + return [(key.split('.')[0] + '.localhost', 'localhost')] + + +class DirectExchange(Exchange): + """ + Exchange where all topic keys are split, sending to second half. + i.e. "compute.host" sends a message to "compute" running on "host" + """ + def __init__(self): + super(Exchange, self).__init__() + + def run(self, key): + b, e = key.split('.', 1) + return [(b, e)] + + +class MatchMakerRing(MatchMakerBase): + """ + Match Maker where hosts are loaded from a static hashmap. + """ + def __init__(self, ring=None): + super(MatchMakerRing, self).__init__() + self.add_binding(FanoutBinding(), FanoutRingExchange(ring)) + self.add_binding(DirectBinding(), DirectExchange()) + self.add_binding(TopicBinding(), RoundRobinRingExchange(ring)) + + +class MatchMakerLocalhost(MatchMakerBase): + """ + Match Maker where all bare topics resolve to localhost. + Useful for testing. + """ + def __init__(self): + super(MatchMakerLocalhost, self).__init__() + self.add_binding(FanoutBinding(), LocalhostExchange()) + self.add_binding(DirectBinding(), DirectExchange()) + self.add_binding(TopicBinding(), LocalhostExchange()) + + +class MatchMakerStub(MatchMakerBase): + """ + Match Maker where topics are untouched. + Useful for testing, or for AMQP/brokered queues. + Will not work where knowledge of hosts is known (i.e. zeromq) + """ + def __init__(self): + super(MatchMakerLocalhost, self).__init__() + + self.add_binding(FanoutBinding(), StubExchange()) + self.add_binding(DirectBinding(), StubExchange()) + self.add_binding(TopicBinding(), StubExchange()) diff --git a/cloudbaseinit/openstack/common/rpc/proxy.py b/cloudbaseinit/openstack/common/rpc/proxy.py new file mode 100644 index 00000000..5ace7da1 --- /dev/null +++ b/cloudbaseinit/openstack/common/rpc/proxy.py @@ -0,0 +1,165 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A helper class for proxy objects to remote APIs. + +For more information about rpc API version numbers, see: + rpc/dispatcher.py +""" + + +from cloudbaseinit.openstack.common import rpc + + +class RpcProxy(object): + """A helper class for rpc clients. + + This class is a wrapper around the RPC client API. It allows you to + specify the topic and API version in a single place. This is intended to + be used as a base class for a class that implements the client side of an + rpc API. + """ + + def __init__(self, topic, default_version): + """Initialize an RpcProxy. + + :param topic: The topic to use for all messages. + :param default_version: The default API version to request in all + outgoing messages. This can be overridden on a per-message + basis. + """ + self.topic = topic + self.default_version = default_version + super(RpcProxy, self).__init__() + + def _set_version(self, msg, vers): + """Helper method to set the version in a message. + + :param msg: The message having a version added to it. + :param vers: The version number to add to the message. + """ + msg['version'] = vers if vers else self.default_version + + def _get_topic(self, topic): + """Return the topic to use for a message.""" + return topic if topic else self.topic + + @staticmethod + def make_msg(method, **kwargs): + return {'method': method, 'args': kwargs} + + def call(self, context, msg, topic=None, version=None, timeout=None): + """rpc.call() a remote method. + + :param context: The request context + :param msg: The message to send, including the method and args. + :param topic: Override the topic for this message. + :param timeout: (Optional) A timeout to use when waiting for the + response. If no timeout is specified, a default timeout will be + used that is usually sufficient. + :param version: (Optional) Override the requested API version in this + message. + + :returns: The return value from the remote method. + """ + self._set_version(msg, version) + return rpc.call(context, self._get_topic(topic), msg, timeout) + + def multicall(self, context, msg, topic=None, version=None, timeout=None): + """rpc.multicall() a remote method. + + :param context: The request context + :param msg: The message to send, including the method and args. + :param topic: Override the topic for this message. + :param timeout: (Optional) A timeout to use when waiting for the + response. If no timeout is specified, a default timeout will be + used that is usually sufficient. + :param version: (Optional) Override the requested API version in this + message. + + :returns: An iterator that lets you process each of the returned values + from the remote method as they arrive. + """ + self._set_version(msg, version) + return rpc.multicall(context, self._get_topic(topic), msg, timeout) + + def cast(self, context, msg, topic=None, version=None): + """rpc.cast() a remote method. + + :param context: The request context + :param msg: The message to send, including the method and args. + :param topic: Override the topic for this message. + :param version: (Optional) Override the requested API version in this + message. + + :returns: None. rpc.cast() does not wait on any return value from the + remote method. + """ + self._set_version(msg, version) + rpc.cast(context, self._get_topic(topic), msg) + + def fanout_cast(self, context, msg, topic=None, version=None): + """rpc.fanout_cast() a remote method. + + :param context: The request context + :param msg: The message to send, including the method and args. + :param topic: Override the topic for this message. + :param version: (Optional) Override the requested API version in this + message. + + :returns: None. rpc.fanout_cast() does not wait on any return value + from the remote method. + """ + self._set_version(msg, version) + rpc.fanout_cast(context, self._get_topic(topic), msg) + + def cast_to_server(self, context, server_params, msg, topic=None, + version=None): + """rpc.cast_to_server() a remote method. + + :param context: The request context + :param server_params: Server parameters. See rpc.cast_to_server() for + details. + :param msg: The message to send, including the method and args. + :param topic: Override the topic for this message. + :param version: (Optional) Override the requested API version in this + message. + + :returns: None. rpc.cast_to_server() does not wait on any + return values. + """ + self._set_version(msg, version) + rpc.cast_to_server(context, server_params, self._get_topic(topic), msg) + + def fanout_cast_to_server(self, context, server_params, msg, topic=None, + version=None): + """rpc.fanout_cast_to_server() a remote method. + + :param context: The request context + :param server_params: Server parameters. See rpc.cast_to_server() for + details. + :param msg: The message to send, including the method and args. + :param topic: Override the topic for this message. + :param version: (Optional) Override the requested API version in this + message. + + :returns: None. rpc.fanout_cast_to_server() does not wait on any + return values. + """ + self._set_version(msg, version) + rpc.fanout_cast_to_server(context, server_params, + self._get_topic(topic), msg) diff --git a/cloudbaseinit/openstack/common/rpc/service.py b/cloudbaseinit/openstack/common/rpc/service.py new file mode 100644 index 00000000..af2e2493 --- /dev/null +++ b/cloudbaseinit/openstack/common/rpc/service.py @@ -0,0 +1,75 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cloudbaseinit.openstack.common.gettextutils import _ +from cloudbaseinit.openstack.common import log as logging +from cloudbaseinit.openstack.common import rpc +from cloudbaseinit.openstack.common.rpc import dispatcher as rpc_dispatcher +from cloudbaseinit.openstack.common import service + + +LOG = logging.getLogger(__name__) + + +class Service(service.Service): + """Service object for binaries running on hosts. + + A service enables rpc by listening to queues based on topic and host.""" + def __init__(self, host, topic, manager=None): + super(Service, self).__init__() + self.host = host + self.topic = topic + if manager is None: + self.manager = self + else: + self.manager = manager + + def start(self): + super(Service, self).start() + + self.conn = rpc.create_connection(new=True) + LOG.debug(_("Creating Consumer connection for Service %s") % + self.topic) + + dispatcher = rpc_dispatcher.RpcDispatcher([self.manager]) + + # Share this same connection for these Consumers + self.conn.create_consumer(self.topic, dispatcher, fanout=False) + + node_topic = '%s.%s' % (self.topic, self.host) + self.conn.create_consumer(node_topic, dispatcher, fanout=False) + + self.conn.create_consumer(self.topic, dispatcher, fanout=True) + + # Hook to allow the manager to do other initializations after + # the rpc connection is created. + if callable(getattr(self.manager, 'initialize_service_hook', None)): + self.manager.initialize_service_hook(self) + + # Consume from all consumers in a thread + self.conn.consume_in_thread() + + def stop(self): + # Try to shut the connection down, but if we get any sort of + # errors, go ahead and ignore them.. as we're shutting down anyway + try: + self.conn.close() + except Exception: + pass + super(Service, self).stop() diff --git a/cloudbaseinit/openstack/common/setup.py b/cloudbaseinit/openstack/common/setup.py new file mode 100644 index 00000000..e6f72f03 --- /dev/null +++ b/cloudbaseinit/openstack/common/setup.py @@ -0,0 +1,366 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Utilities with minimum-depends for use in setup.py +""" + +import datetime +import os +import re +import subprocess +import sys + +from setuptools.command import sdist + + +def parse_mailmap(mailmap='.mailmap'): + mapping = {} + if os.path.exists(mailmap): + with open(mailmap, 'r') as fp: + for l in fp: + l = l.strip() + if not l.startswith('#') and ' ' in l: + canonical_email, alias = [x for x in l.split(' ') + if x.startswith('<')] + mapping[alias] = canonical_email + return mapping + + +def canonicalize_emails(changelog, mapping): + """Takes in a string and an email alias mapping and replaces all + instances of the aliases in the string with their real email. + """ + for alias, email in mapping.iteritems(): + changelog = changelog.replace(alias, email) + return changelog + + +# Get requirements from the first file that exists +def get_reqs_from_files(requirements_files): + for requirements_file in requirements_files: + if os.path.exists(requirements_file): + with open(requirements_file, 'r') as fil: + return fil.read().split('\n') + return [] + + +def parse_requirements(requirements_files=['requirements.txt', + 'tools/pip-requires']): + requirements = [] + for line in get_reqs_from_files(requirements_files): + # For the requirements list, we need to inject only the portion + # after egg= so that distutils knows the package it's looking for + # such as: + # -e git://github.com/openstack/nova/master#egg=nova + if re.match(r'\s*-e\s+', line): + requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1', + line)) + # such as: + # http://github.com/openstack/nova/zipball/master#egg=nova + elif re.match(r'\s*https?:', line): + requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1', + line)) + # -f lines are for index locations, and don't get used here + elif re.match(r'\s*-f\s+', line): + pass + # argparse is part of the standard library starting with 2.7 + # adding it to the requirements list screws distro installs + elif line == 'argparse' and sys.version_info >= (2, 7): + pass + else: + requirements.append(line) + + return requirements + + +def parse_dependency_links(requirements_files=['requirements.txt', + 'tools/pip-requires']): + dependency_links = [] + # dependency_links inject alternate locations to find packages listed + # in requirements + for line in get_reqs_from_files(requirements_files): + # skip comments and blank lines + if re.match(r'(\s*#)|(\s*$)', line): + continue + # lines with -e or -f need the whole line, minus the flag + if re.match(r'\s*-[ef]\s+', line): + dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line)) + # lines that are only urls can go in unmolested + elif re.match(r'\s*https?:', line): + dependency_links.append(line) + return dependency_links + + +def write_requirements(): + venv = os.environ.get('VIRTUAL_ENV', None) + if venv is not None: + with open("requirements.txt", "w") as req_file: + output = subprocess.Popen(["pip", "-E", venv, "freeze", "-l"], + stdout=subprocess.PIPE) + requirements = output.communicate()[0].strip() + req_file.write(requirements) + + +def _run_shell_command(cmd): + if os.name == 'nt': + output = subprocess.Popen(["cmd.exe", "/C", cmd], + stdout=subprocess.PIPE) + else: + output = subprocess.Popen(["/bin/sh", "-c", cmd], + stdout=subprocess.PIPE) + out = output.communicate() + if len(out) == 0: + return None + if len(out[0].strip()) == 0: + return None + return out[0].strip() + + +def _get_git_next_version_suffix(branch_name): + datestamp = datetime.datetime.now().strftime('%Y%m%d') + if branch_name == 'milestone-proposed': + revno_prefix = "r" + else: + revno_prefix = "" + _run_shell_command("git fetch origin +refs/meta/*:refs/remotes/meta/*") + milestone_cmd = "git show meta/openstack/release:%s" % branch_name + milestonever = _run_shell_command(milestone_cmd) + if milestonever: + first_half = "%s~%s" % (milestonever, datestamp) + else: + first_half = datestamp + + post_version = _get_git_post_version() + # post version should look like: + # 0.1.1.4.gcc9e28a + # where the bit after the last . is the short sha, and the bit between + # the last and second to last is the revno count + (revno, sha) = post_version.split(".")[-2:] + second_half = "%s%s.%s" % (revno_prefix, revno, sha) + return ".".join((first_half, second_half)) + + +def _get_git_current_tag(): + return _run_shell_command("git tag --contains HEAD") + + +def _get_git_tag_info(): + return _run_shell_command("git describe --tags") + + +def _get_git_post_version(): + current_tag = _get_git_current_tag() + if current_tag is not None: + return current_tag + else: + tag_info = _get_git_tag_info() + if tag_info is None: + base_version = "0.0" + cmd = "git --no-pager log --oneline" + out = _run_shell_command(cmd) + revno = len(out.split("\n")) + sha = _run_shell_command("git describe --always") + else: + tag_infos = tag_info.split("-") + base_version = "-".join(tag_infos[:-2]) + (revno, sha) = tag_infos[-2:] + return "%s.%s.%s" % (base_version, revno, sha) + + +def write_git_changelog(): + """Write a changelog based on the git changelog.""" + new_changelog = 'ChangeLog' + if not os.getenv('SKIP_WRITE_GIT_CHANGELOG'): + if os.path.isdir('.git'): + git_log_cmd = 'git log --stat' + changelog = _run_shell_command(git_log_cmd) + mailmap = parse_mailmap() + with open(new_changelog, "w") as changelog_file: + changelog_file.write(canonicalize_emails(changelog, mailmap)) + else: + open(new_changelog, 'w').close() + + +def generate_authors(): + """Create AUTHORS file using git commits.""" + jenkins_email = 'jenkins@review.(openstack|stackforge).org' + old_authors = 'AUTHORS.in' + new_authors = 'AUTHORS' + if not os.getenv('SKIP_GENERATE_AUTHORS'): + if os.path.isdir('.git'): + # don't include jenkins email address in AUTHORS file + git_log_cmd = ("git log --format='%aN <%aE>' | sort -u | " + "egrep -v '" + jenkins_email + "'") + changelog = _run_shell_command(git_log_cmd) + mailmap = parse_mailmap() + with open(new_authors, 'w') as new_authors_fh: + new_authors_fh.write(canonicalize_emails(changelog, mailmap)) + if os.path.exists(old_authors): + with open(old_authors, "r") as old_authors_fh: + new_authors_fh.write('\n' + old_authors_fh.read()) + else: + open(new_authors, 'w').close() + + +_rst_template = """%(heading)s +%(underline)s + +.. automodule:: %(module)s + :members: + :undoc-members: + :show-inheritance: +""" + + +def read_versioninfo(project): + """Read the versioninfo file. If it doesn't exist, we're in a github + zipball, and there's really no way to know what version we really + are, but that should be ok, because the utility of that should be + just about nil if this code path is in use in the first place.""" + versioninfo_path = os.path.join(project, 'versioninfo') + if os.path.exists(versioninfo_path): + with open(versioninfo_path, 'r') as vinfo: + version = vinfo.read().strip() + else: + version = "0.0.0" + return version + + +def write_versioninfo(project, version): + """Write a simple file containing the version of the package.""" + with open(os.path.join(project, 'versioninfo'), 'w') as fil: + fil.write("%s\n" % version) + + +def get_cmdclass(): + """Return dict of commands to run from setup.py.""" + + cmdclass = dict() + + def _find_modules(arg, dirname, files): + for filename in files: + if filename.endswith('.py') and filename != '__init__.py': + arg["%s.%s" % (dirname.replace('/', '.'), + filename[:-3])] = True + + class LocalSDist(sdist.sdist): + """Builds the ChangeLog and Authors files from VC first.""" + + def run(self): + write_git_changelog() + generate_authors() + # sdist.sdist is an old style class, can't use super() + sdist.sdist.run(self) + + cmdclass['sdist'] = LocalSDist + + # If Sphinx is installed on the box running setup.py, + # enable setup.py to build the documentation, otherwise, + # just ignore it + try: + from sphinx.setup_command import BuildDoc + + class LocalBuildDoc(BuildDoc): + def generate_autoindex(self): + print "**Autodocumenting from %s" % os.path.abspath(os.curdir) + modules = {} + option_dict = self.distribution.get_option_dict('build_sphinx') + source_dir = os.path.join(option_dict['source_dir'][1], 'api') + if not os.path.exists(source_dir): + os.makedirs(source_dir) + for pkg in self.distribution.packages: + if '.' not in pkg: + os.path.walk(pkg, _find_modules, modules) + module_list = modules.keys() + module_list.sort() + autoindex_filename = os.path.join(source_dir, 'autoindex.rst') + with open(autoindex_filename, 'w') as autoindex: + autoindex.write(""".. toctree:: + :maxdepth: 1 + +""") + for module in module_list: + output_filename = os.path.join(source_dir, + "%s.rst" % module) + heading = "The :mod:`%s` Module" % module + underline = "=" * len(heading) + values = dict(module=module, heading=heading, + underline=underline) + + print "Generating %s" % output_filename + with open(output_filename, 'w') as output_file: + output_file.write(_rst_template % values) + autoindex.write(" %s.rst\n" % module) + + def run(self): + if not os.getenv('SPHINX_DEBUG'): + self.generate_autoindex() + + for builder in ['html', 'man']: + self.builder = builder + self.finalize_options() + self.project = self.distribution.get_name() + self.version = self.distribution.get_version() + self.release = self.distribution.get_version() + BuildDoc.run(self) + cmdclass['build_sphinx'] = LocalBuildDoc + except ImportError: + pass + + return cmdclass + + +def get_git_branchname(): + for branch in _run_shell_command("git branch --color=never").split("\n"): + if branch.startswith('*'): + _branch_name = branch.split()[1].strip() + if _branch_name == "(no": + _branch_name = "no-branch" + return _branch_name + + +def get_pre_version(projectname, base_version): + """Return a version which is leading up to a version that will + be released in the future.""" + if os.path.isdir('.git'): + current_tag = _get_git_current_tag() + if current_tag is not None: + version = current_tag + else: + branch_name = os.getenv('BRANCHNAME', + os.getenv('GERRIT_REFNAME', + get_git_branchname())) + version_suffix = _get_git_next_version_suffix(branch_name) + version = "%s~%s" % (base_version, version_suffix) + write_versioninfo(projectname, version) + return version + else: + version = read_versioninfo(projectname) + return version + + +def get_post_version(projectname): + """Return a version which is equal to the tag that's on the current + revision if there is one, or tag plus number of additional revisions + if the current revision has no tag.""" + + if os.path.isdir('.git'): + version = _get_git_post_version() + write_versioninfo(projectname, version) + return version + return read_versioninfo(projectname) diff --git a/cloudbaseinit/openstack/common/timeutils.py b/cloudbaseinit/openstack/common/timeutils.py new file mode 100644 index 00000000..ea691642 --- /dev/null +++ b/cloudbaseinit/openstack/common/timeutils.py @@ -0,0 +1,160 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Time related utilities and helper functions. +""" + +import calendar +import datetime + +import iso8601 + + +TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" +PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" + + +def isotime(at=None): + """Stringify time in ISO 8601 format""" + if not at: + at = utcnow() + str = at.strftime(TIME_FORMAT) + tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' + str += ('Z' if tz == 'UTC' else tz) + return str + + +def parse_isotime(timestr): + """Parse time from ISO 8601 format""" + try: + return iso8601.parse_date(timestr) + except iso8601.ParseError as e: + raise ValueError(e.message) + except TypeError as e: + raise ValueError(e.message) + + +def strtime(at=None, fmt=PERFECT_TIME_FORMAT): + """Returns formatted utcnow.""" + if not at: + at = utcnow() + return at.strftime(fmt) + + +def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): + """Turn a formatted time back into a datetime.""" + return datetime.datetime.strptime(timestr, fmt) + + +def normalize_time(timestamp): + """Normalize time in arbitrary timezone to UTC naive object""" + offset = timestamp.utcoffset() + if offset is None: + return timestamp + return timestamp.replace(tzinfo=None) - offset + + +def is_older_than(before, seconds): + """Return True if before is older than seconds.""" + return utcnow() - before > datetime.timedelta(seconds=seconds) + + +def is_newer_than(after, seconds): + """Return True if after is newer than seconds.""" + return after - utcnow() > datetime.timedelta(seconds=seconds) + + +def utcnow_ts(): + """Timestamp version of our utcnow function.""" + return calendar.timegm(utcnow().timetuple()) + + +def utcnow(): + """Overridable version of utils.utcnow.""" + if utcnow.override_time: + try: + return utcnow.override_time.pop(0) + except AttributeError: + return utcnow.override_time + return datetime.datetime.utcnow() + + +utcnow.override_time = None + + +def set_time_override(override_time=datetime.datetime.utcnow()): + """ + Override utils.utcnow to return a constant time or a list thereof, + one at a time. + """ + utcnow.override_time = override_time + + +def advance_time_delta(timedelta): + """Advance overridden time using a datetime.timedelta.""" + assert(not utcnow.override_time is None) + try: + for dt in utcnow.override_time: + dt += timedelta + except TypeError: + utcnow.override_time += timedelta + + +def advance_time_seconds(seconds): + """Advance overridden time by seconds.""" + advance_time_delta(datetime.timedelta(0, seconds)) + + +def clear_time_override(): + """Remove the overridden time.""" + utcnow.override_time = None + + +def marshall_now(now=None): + """Make an rpc-safe datetime with microseconds. + + Note: tzinfo is stripped, but not required for relative times.""" + if not now: + now = utcnow() + return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, + minute=now.minute, second=now.second, + microsecond=now.microsecond) + + +def unmarshall_time(tyme): + """Unmarshall a datetime dict.""" + return datetime.datetime(day=tyme['day'], + month=tyme['month'], + year=tyme['year'], + hour=tyme['hour'], + minute=tyme['minute'], + second=tyme['second'], + microsecond=tyme['microsecond']) + + +def delta_seconds(before, after): + """ + Compute the difference in seconds between two date, time, or + datetime objects (as a float, to microsecond resolution). + """ + delta = after - before + try: + return delta.total_seconds() + except AttributeError: + return ((delta.days * 24 * 3600) + delta.seconds + + float(delta.microseconds) / (10 ** 6)) diff --git a/cloudbaseinit/openstack/common/uuidutils.py b/cloudbaseinit/openstack/common/uuidutils.py new file mode 100644 index 00000000..7608acb9 --- /dev/null +++ b/cloudbaseinit/openstack/common/uuidutils.py @@ -0,0 +1,39 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Intel Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +UUID related utilities and helper functions. +""" + +import uuid + + +def generate_uuid(): + return str(uuid.uuid4()) + + +def is_uuid_like(val): + """Returns validation of a value as a UUID. + + For our purposes, a UUID is a canonical form string: + aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa + + """ + try: + return str(uuid.UUID(val)) == val + except (TypeError, ValueError, AttributeError): + return False diff --git a/cloudbaseinit/osutils/__init__.py b/cloudbaseinit/osutils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloudbaseinit/osutils/base.py b/cloudbaseinit/osutils/base.py new file mode 100644 index 00000000..4a35c8d5 --- /dev/null +++ b/cloudbaseinit/osutils/base.py @@ -0,0 +1,57 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import subprocess + +class BaseOSUtils(object): + def reboot(self): + pass + + def user_exists(self, username): + pass + + def execute_process(self, args, shell=True): + p = subprocess.Popen(args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=shell) + (out, err) = p.communicate() + return (out, err, p.returncode) + + def sanitize_shell_input(shell, value): + pass + + def create_user(self, username, password, password_expires=False): + pass + + def set_user_password(self, username, password, password_expires=False): + pass + + def add_user_to_local_group(self, username, groupname): + pass + + def set_host_name(shell, new_host_name): + pass + + def get_user_home(self, username): + pass + + def get_network_adapters(self): + pass + + def set_static_network_config(self, adapter_name, address, netmask, + broadcast, gateway, dnsdomain, dnsnameservers): + pass diff --git a/cloudbaseinit/osutils/factory.py b/cloudbaseinit/osutils/factory.py new file mode 100644 index 00000000..d97a3e44 --- /dev/null +++ b/cloudbaseinit/osutils/factory.py @@ -0,0 +1,29 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from cloudbaseinit.utils import * + +class OSUtilsFactory(object): + def get_os_utils(self): + osutils_class_paths = { + 'nt' : 'cloudbaseinit.osutils.windows.WindowsUtils', + 'posix' : 'cloudbaseinit.osutils.posix.PosixUtils' + } + + utils = Utils() + return utils.load_class(osutils_class_paths[os.name])() diff --git a/cloudbaseinit/osutils/posix.py b/cloudbaseinit/osutils/posix.py new file mode 100644 index 00000000..8c11c82d --- /dev/null +++ b/cloudbaseinit/osutils/posix.py @@ -0,0 +1,23 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from cloudbaseinit.osutils.base import * + +class PosixUtil(BaseOSUtils): + def reboot(self): + os.system('reboot') diff --git a/cloudbaseinit/osutils/windows.py b/cloudbaseinit/osutils/windows.py new file mode 100644 index 00000000..8328a524 --- /dev/null +++ b/cloudbaseinit/osutils/windows.py @@ -0,0 +1,246 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import _winreg +import ctypes +import logging +import wmi + +from ctypes import windll +from ctypes import wintypes + +from cloudbaseinit.osutils.base import * + +advapi32 = windll.advapi32 +kernel32 = windll.kernel32 +netapi32 = windll.netapi32 +userenv = windll.userenv + +LOG = logging.getLogger(__name__) + + +class Win32_PROFILEINFO(ctypes.Structure): + _fields_ = [ + ('dwSize', wintypes.DWORD), + ('dwFlags', wintypes.DWORD), + ('lpUserName', wintypes.LPWSTR), + ('lpProfilePath', wintypes.LPWSTR), + ('lpDefaultPath', wintypes.LPWSTR), + ('lpServerName', wintypes.LPWSTR), + ('lpPolicyPath', wintypes.LPWSTR), + ('hprofile', wintypes.HANDLE) + ] + + +class Win32_LOCALGROUP_MEMBERS_INFO_3(ctypes.Structure): + _fields_ = [ + ('lgrmi3_domainandname', wintypes.LPWSTR) + ] + + +class WindowsUtils(BaseOSUtils): + NERR_GroupNotFound = 2220 + ERROR_ACCESS_DENIED = 5 + ERROR_NO_SUCH_MEMBER = 1387 + ERROR_MEMBER_IN_ALIAS = 1378 + ERROR_INVALID_MEMBER = 1388 + + def reboot(self): + conn = wmi.WMI(moniker='//./root/cimv2') + conn.Win32_OperatingSystem()[0].Reboot() + + def _get_user_wmi_object(self, username): + conn = wmi.WMI(moniker='//./root/cimv2') + username_san = self._sanitize_wmi_input(username) + q = conn.query('SELECT * FROM Win32_Account where name = \'%(username_san)s\'' % locals()) + if len(q) > 0: + return q[0] + return None + + def user_exists(self, username): + return self._get_user_wmi_object(username) != None + + def _create_or_change_user(self, username, password, create, password_expires): + username_san = self.sanitize_shell_input(username) + password_san = self.sanitize_shell_input(password) + + args = ['NET', 'USER', username_san, password_san] + if create: + args.append('/ADD') + + (out, err, ret_val) = self.execute_process(args) + if not ret_val: + self._set_user_password_expiration(username, password_expires) + + return ret_val == 0 + + def _sanitize_wmi_input(self, value): + return value.replace('\'', '\'\'') + + def _set_user_password_expiration(self, username, password_expires): + r = self._get_user_wmi_object(username) + if not r: + return False + r.PasswordExpires = password_expires + r.Put_() + return True + + def create_user(self, username, password, password_expires=False): + if not self._create_or_change_user(username, password, True, + password_expires): + raise Exception("Create user failed") + + def set_user_password(self, username, password, password_expires=False): + if not self._create_or_change_user(username, password, False, + password_expires): + raise Exception("Set user password failed") + + def _get_user_sid_and_domain(self, username): + sid = ctypes.create_string_buffer(1024) + cbSid = wintypes.DWORD(ctypes.sizeof(sid)) + domainName = ctypes.create_unicode_buffer(1024) + cchReferencedDomainName = wintypes.DWORD( + ctypes.sizeof(domainName) / ctypes.sizeof(wintypes.WCHAR)) + sidNameUse = wintypes.DWORD() + + ret_val = advapi32.LookupAccountNameW(0, unicode(username), sid, + ctypes.byref(cbSid), domainName, + ctypes.byref(cchReferencedDomainName), + ctypes.byref(sidNameUse)) + if not ret_val: + raise Exception("Cannot get user SID") + + return (sid, domainName.value) + + def add_user_to_local_group(self, username, groupname): + + lmi = Win32_LOCALGROUP_MEMBERS_INFO_3() + lmi.lgrmi3_domainandname = unicode(username) + + ret_val = netapi32.NetLocalGroupAddMembers(0, unicode(groupname), 3, + ctypes.addressof(lmi), 1) + + if ret_val == self.NERR_GroupNotFound: + raise Exception('Group not found') + elif ret_val == self.ERROR_ACCESS_DENIED: + raise Exception('Access denied') + elif ret_val == self.ERROR_NO_SUCH_MEMBER: + raise Exception('Username not found') + elif ret_val == self.ERROR_MEMBER_IN_ALIAS: + # The user is already a member of the group + pass + elif ret_val == self.ERROR_INVALID_MEMBER: + raise Exception('Invalid user') + elif ret_val != 0: + raise Exception('Unknown error') + + def get_user_sid(self, username): + r = self._get_user_wmi_object(username) + if not r: + return None + return r.SID + + def create_user_logon_session(self, username, password, domain='.', load_profile=True): + token = wintypes.HANDLE() + ret_val = advapi32.LogonUserW(unicode(username), unicode(domain), + unicode(password), 2, 0, ctypes.byref(token)) + if not ret_val: + raise Exception("User logon failed") + + if load_profile: + pi = Win32_PROFILEINFO() + pi.dwSize = ctypes.sizeof(Win32_PROFILEINFO) + pi.lpUserName = unicode(username) + ret_val = userenv.LoadUserProfileW(token, ctypes.byref(pi)) + if not ret_val: + kernel32.CloseHandle(token) + raise Exception("Cannot load user profile") + + return token + + def close_user_logon_session(self, token): + kernel32.CloseHandle(token) + + def get_user_home(self, username): + user_sid = self.get_user_sid(username) + if user_sid: + with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, + 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ProfileList\\%s' + % user_sid) as key: + return _winreg.QueryValueEx(key, 'ProfileImagePath')[0] + LOG.debug('Home directory not found for user \'%s\'' % username) + return None + + def sanitize_shell_input(shell, value): + return value.replace('"', '\\"') + + def set_host_name(shell, new_host_name): + conn = wmi.WMI(moniker='//./root/cimv2') + comp = conn.Win32_ComputerSystem()[0] + if comp.Name != new_host_name: + comp.Rename(new_host_name, None, None) + return True + else: + return False + + def check_powershell_exec_policy(self): + LOG.debug('Check Powershell execution policy') + args = ['powershell.exe', + 'if ((Get-ExecutionPolicy).value__ -gt 1) ' + '{ set-executionpolicy RemoteSigned}'] + self.execute_process(args, False) + + def get_network_adapters(self): + l = [] + conn = wmi.WMI(moniker='//./root/cimv2') + # Get Ethernet adapters only + q = conn.query('SELECT * FROM Win32_NetworkAdapter where AdapterTypeId = 0') + for r in q: + l.append(r.Name) + return l + + def set_static_network_config(self, adapter_name, address, netmask, + broadcast, gateway, dnsnameservers): + conn = wmi.WMI(moniker='//./root/cimv2') + + adapter_name_san = self._sanitize_wmi_input(adapter_name) + q = conn.query('SELECT * FROM Win32_NetworkAdapter ' + 'where Name = \'%(adapter_name_san)s\'' % locals()) + if not len(q): + raise Exception("Network adapter not found") + + adapter_config = q[0].associators( + wmi_result_class='Win32_NetworkAdapterConfiguration')[0] + + LOG.debug("Setting static IP address") + (ret_val,) = adapter_config.EnableStatic([address], [netmask]) + if ret_val > 1: + raise Exception("Cannot set static IP address on network adapter") + reboot_required = (ret_val == 1) + + LOG.debug("Setting static gateways") + (ret_val,) = adapter_config.SetGateways([gateway], [1]) + if ret_val > 1: + raise Exception("Cannot set gateway on network adapter") + reboot_required = reboot_required or ret_val == 1 + + LOG.debug("Setting static DNS servers") + (ret_val,) = adapter_config.SetDNSServerSearchOrder(dnsnameservers) + if ret_val > 1: + raise Exception("Cannot set DNS on network adapter") + reboot_required = reboot_required or ret_val == 1 + + return reboot_required \ No newline at end of file diff --git a/cloudbaseinit/plugins/__init__.py b/cloudbaseinit/plugins/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloudbaseinit/plugins/base.py b/cloudbaseinit/plugins/base.py new file mode 100644 index 00000000..da2cf61c --- /dev/null +++ b/cloudbaseinit/plugins/base.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +class BasePlugin(object): + def execute(self, service, config): + pass diff --git a/cloudbaseinit/plugins/factory.py b/cloudbaseinit/plugins/factory.py new file mode 100644 index 00000000..27b35651 --- /dev/null +++ b/cloudbaseinit/plugins/factory.py @@ -0,0 +1,43 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cloudbaseinit.openstack.common import cfg +from cloudbaseinit.utils import * + +opts = [ + cfg.ListOpt('plugins', default=[ + 'cloudbaseinit.plugins.windows.sethostname.SetHostNamePlugin', + 'cloudbaseinit.plugins.windows.createuser.CreateUserPlugin', + 'cloudbaseinit.plugins.windows.networkconfig.NetworkConfigPlugin', + 'cloudbaseinit.plugins.windows.sshpublickeys.' + 'SetUserSSHPublicKeysPlugin', + 'cloudbaseinit.plugins.windows.userdata.UserDataPlugin' + ], + help='List of enabled plugin classes, ' + 'to executed in the provided order'), + ] + +CONF = cfg.CONF +CONF.register_opts(opts) + + +class PluginFactory(object): + def load_plugins(self): + plugins = [] + utils = Utils() + for class_path in CONF.plugins: + plugins.append(utils.load_class(class_path)()) + return plugins diff --git a/cloudbaseinit/plugins/windows/__init__.py b/cloudbaseinit/plugins/windows/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cloudbaseinit/plugins/windows/createuser.py b/cloudbaseinit/plugins/windows/createuser.py new file mode 100644 index 00000000..b4baf544 --- /dev/null +++ b/cloudbaseinit/plugins/windows/createuser.py @@ -0,0 +1,77 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import uuid + +from cloudbaseinit.openstack.common import cfg +from cloudbaseinit.osutils.factory import * +from cloudbaseinit.plugins.base import * + +opts = [ + cfg.StrOpt('username', default='Admin', + help='User to be added to the system or updated if already existing'), + cfg.ListOpt('groups', default=['Administrators'], + help='List of local groups to which the user specified ' + 'in \'username\' will be added'), + cfg.BoolOpt('inject_user_password', default=True, + help='Set the password provided in the configuration. ' + 'If False or no password is provided, a random one will be set'), + ] + +CONF = cfg.CONF +CONF.register_opts(opts) + +LOG = logging.getLogger(__name__) + + +class CreateUserPlugin(BasePlugin): + def execute(self, service): + username = CONF.username + + meta_data = service.get_meta_data('openstack') + if 'admin_pass' in meta_data and CONF.inject_user_password: + password = meta_data['admin_pass'] + else: + password = None + + osutils = OSUtilsFactory().get_os_utils() + if not osutils.user_exists(username): + if not password: + # Generate a random password + # Limit to 14 chars for compatibility with NT + LOG.debug("Generating a random password") + password = str(uuid.uuid4()).replace('-', '')[:14] + + osutils.create_user(username, password) + else: + if password: + osutils.set_user_password(username, password) + + if password: + # Create a user profile in order for other plugins + # to access the user home, etc + token = osutils.create_user_logon_session(username, password, True) + osutils.close_user_logon_session(token) + + for group in CONF.groups: + try: + osutils.add_user_to_local_group(username, group) + except Exception, ex: + LOG.error('Cannot add user to group \'%(group)s\'. ' + 'Error: %(ex)s' % locals()) + + return False diff --git a/cloudbaseinit/plugins/windows/networkconfig.py b/cloudbaseinit/plugins/windows/networkconfig.py new file mode 100644 index 00000000..44bac959 --- /dev/null +++ b/cloudbaseinit/plugins/windows/networkconfig.py @@ -0,0 +1,83 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import re + +from cloudbaseinit.openstack.common import cfg +from cloudbaseinit.osutils.factory import * +from cloudbaseinit.plugins.base import * + +LOG = logging.getLogger(__name__) + +opts = [ + cfg.StrOpt('network_adapter', default=None, + help='Network adapter to configure. If not specified, the first ' + 'available ethernet adapter will be chosen'), + ] + +CONF = cfg.CONF +CONF.register_opts(opts) + + +class NetworkConfigPlugin(): + def execute(self, service): + meta_data = service.get_meta_data('openstack') + if 'network_config' not in meta_data: + return False + network_config = meta_data['network_config'] + if 'content_path' not in network_config: + return False + + content_path = network_config['content_path'] + content_name = content_path.rsplit('/', 1)[-1] + debian_network_conf = service.get_content('openstack', content_name) + + LOG.debug('network config content:\n%s' % debian_network_conf) + + # TODO (alexpilotti): implement a proper grammar + m = re.search(r'iface eth0 inet static\s+' + 'address\s+(?P
[^\s]+)\s+' + 'netmask\s+(?P[^\s]+)\s+' + 'broadcast\s+(?P[^\s]+)\s+' + 'gateway\s+(?P[^\s]+)\s+' + 'dns\-nameservers\s+(?P[^\r\n]+)\s+', debian_network_conf) + if not m: + raise Exception("network_config format not recognized") + + address = m.group('address') + netmask = m.group('netmask') + broadcast = m.group('broadcast') + gateway = m.group('gateway') + dnsnameservers = m.group('dnsnameservers').strip().split(' ') + + osutils = OSUtilsFactory().get_os_utils() + + network_adapter = CONF.network_adapter + if not network_adapter: + # Get the first available one + available_adapters = osutils.get_network_adapters() + if not len(available_adapters): + raise Exception("No network adapter available") + network_adapter_name = available_adapters[0] + + LOG.info('Configuring network adapter: \'%s\'' % network_adapter_name) + + reboot_required = osutils.set_static_network_config( + network_adapter_name, address, netmask, broadcast, + gateway, dnsnameservers) + + return reboot_required diff --git a/cloudbaseinit/plugins/windows/sethostname.py b/cloudbaseinit/plugins/windows/sethostname.py new file mode 100644 index 00000000..45f00da4 --- /dev/null +++ b/cloudbaseinit/plugins/windows/sethostname.py @@ -0,0 +1,36 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +from cloudbaseinit.osutils.factory import * +from cloudbaseinit.plugins.base import * + +LOG = logging.getLogger(__name__) + + +class SetHostNamePlugin(BasePlugin): + def execute(self, service): + meta_data = service.get_meta_data('openstack') + if 'hostname' not in meta_data: + LOG.debug('Hostname not found in metadata') + return False + + osutils = OSUtilsFactory().get_os_utils() + + new_host_name = meta_data['hostname'].split('.', 1)[0] + return osutils.set_host_name(new_host_name) + diff --git a/cloudbaseinit/plugins/windows/sshpublickeys.py b/cloudbaseinit/plugins/windows/sshpublickeys.py new file mode 100644 index 00000000..49b62ba2 --- /dev/null +++ b/cloudbaseinit/plugins/windows/sshpublickeys.py @@ -0,0 +1,52 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import os + +from cloudbaseinit.openstack.common import cfg +from cloudbaseinit.osutils.factory import * +from cloudbaseinit.plugins.base import * + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class SetUserSSHPublicKeysPlugin(BasePlugin): + def execute(self, service): + meta_data = service.get_meta_data('openstack') + if not 'public_keys' in meta_data: + return False + + username = CONF.username + + osutils = OSUtilsFactory().get_os_utils() + user_home = osutils.get_user_home(username) + + if not user_home: + raise Exception("User profile not found!") + + LOG.debug("User home: %s" % user_home) + + user_ssh_dir = os.path.join(user_home, '.ssh') + if not os.path.exists(user_ssh_dir): + os.makedirs(user_ssh_dir) + + authorized_keys_path = os.path.join(user_ssh_dir, "authorized_keys") + with open(authorized_keys_path, 'w') as f: + public_keys = meta_data['public_keys'] + for k in public_keys: + f.write(public_keys[k]) diff --git a/cloudbaseinit/plugins/windows/userdata.py b/cloudbaseinit/plugins/windows/userdata.py new file mode 100644 index 00000000..8786cfee --- /dev/null +++ b/cloudbaseinit/plugins/windows/userdata.py @@ -0,0 +1,72 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import re +import tempfile +import uuid + +from cloudbaseinit.osutils.factory import * +from cloudbaseinit.plugins.base import * + +LOG = logging.getLogger(__name__) + + +class UserDataPlugin(): + def execute(self, service): + user_data = service.get_user_data('openstack') + if not user_data: + return False + + LOG.debug('User data content:\n%s' % user_data) + + osutils = OSUtilsFactory().get_os_utils() + + target_path = os.path.join(tempfile.gettempdir(), str(uuid.uuid4())) + if re.search(r'^rem cmd\s', user_data, re.I): + target_path += '.cmd' + args = [target_path] + shell = True + elif re.search(r'^#!', user_data, re.I): + target_path += '.sh' + args = ['bash.exe', target_path] + shell = False + elif re.search(r'^#ps1\s', user_data, re.I): + target_path += '.ps1' + args = ['powershell.exe', target_path] + osutils.check_powershell_exec_policy() + shell = False + else: + # Unsupported + LOG.warning('Unsupported user_data format') + return False + + try: + with open(target_path, 'wb') as f: + f.write(user_data) + (out, err, ret_val) = osutils.execute_process(args, shell) + + LOG.info('User_data script ended with return code: %d' % ret_val) + LOG.debug('User_data stdout:\n%s' % out) + LOG.debug('User_data stderr:\n%s' % err) + except Exception, ex: + LOG.warning('An error occurred during user_data execution: \'%s\'' % ex) + finally: + if os.path.exists(target_path): + os.remove(target_path) + + return False + diff --git a/cloudbaseinit/shell.py b/cloudbaseinit/shell.py new file mode 100644 index 00000000..f77c340d --- /dev/null +++ b/cloudbaseinit/shell.py @@ -0,0 +1,34 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from cloudbaseinit.openstack.common import cfg +from cloudbaseinit.openstack.common import log as logging2 +from cloudbaseinit.init import * + +CONF = cfg.CONF +LOG = logging2.getLogger(__name__) + + +def main(): + CONF(sys.argv[1:]) + logging2.setup('cloudbaseinit') + + init = InitManager() + init.configure_host() + + diff --git a/cloudbaseinit/utils.py b/cloudbaseinit/utils.py new file mode 100644 index 00000000..2e252672 --- /dev/null +++ b/cloudbaseinit/utils.py @@ -0,0 +1,27 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +LOG = logging.getLogger(__name__) + + +class Utils(object): + def load_class(self, class_path): + LOG.debug('Loading class \'%s\'' % class_path) + parts = class_path.rsplit('.', 1) + module = __import__(parts[0], fromlist=parts[1]) + return getattr(module, parts[1]) diff --git a/openstack-common.conf b/openstack-common.conf new file mode 100644 index 00000000..eaebe6c6 --- /dev/null +++ b/openstack-common.conf @@ -0,0 +1,7 @@ +[DEFAULT] + +# The list of modules to copy from openstack-common +modules=cfg,context,excutils,eventlet_backdoor,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,setup,timeutils,rpc,uuidutils + +# The base module to hold the copy of openstack.common +base=cloudbaseinit diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..d221e92f --- /dev/null +++ b/setup.py @@ -0,0 +1,48 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import setuptools + +from nova.openstack.common import setup as common_setup +from nova import version + +requires = common_setup.parse_requirements() +dependency_links = common_setup.parse_dependency_links() + +setuptools.setup(name='cloudbase-init', + version='0.9.0', + description='Portable cloud initialization service', + author='Cloudbase Solutions Srl', + author_email='apilotti@cloudbasesolutions.com', + url='http://www.cloudbase.it/', + classifiers=[ + 'Environment :: OpenStack', + 'Intended Audience :: Information Technology', + 'Intended Audience :: System Administrators', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + ], + cmdclass=common_setup.get_cmdclass(), + packages=setuptools.find_packages(exclude=['bin']), + install_requires=requires, + dependency_links=dependency_links, + include_package_data=True, + setup_requires=['setuptools_git>=0.4'], + entry_points={'console_scripts': ['cloudbase-init = cloudbaseinit.shell:main']}, + py_modules=[]) diff --git a/tools/pip-requires b/tools/pip-requires new file mode 100644 index 00000000..2dbb9cd9 --- /dev/null +++ b/tools/pip-requires @@ -0,0 +1 @@ +wmi