Fix notification/sync daemon; get principals from keystone roles.

Change-Id: I240c402af07bcb83e99343eb207ec906ce0d4caa
Signed-off-by: Pino de Candia <giuseppe.decandia@gmail.com>
This commit is contained in:
Pino de Candia 2018-01-27 23:58:28 -06:00 committed by Pino de Candia
parent d34125d4f7
commit 7679f42150
27 changed files with 822 additions and 652 deletions

74
alembic.ini Normal file
View File

@ -0,0 +1,74 @@
# A generic, single database configuration.
[alembic]
# path to migration scripts
script_location = alembic
# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s
# timezone to use when rendering the date
# within the migration file as well as the filename.
# string value is passed to dateutil.tz.gettz()
# leave blank for localtime
# timezone =
# max length of characters to apply to the
# "slug" field
#truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
# version location specification; this defaults
# to alembic/versions. When using multiple version
# directories, initial revisions must be specified with --version-path
# version_locations = %(here)s/bar %(here)s/bat alembic/versions
# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8
sqlalchemy.url = driver://user:pass@localhost/dbname
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S

1
alembic/README Normal file
View File

@ -0,0 +1 @@
Generic single-database configuration.

70
alembic/env.py Normal file
View File

@ -0,0 +1,70 @@
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()

24
alembic/script.py.mako Normal file
View File

@ -0,0 +1,24 @@
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
branch_labels = ${repr(branch_labels)}
depends_on = ${repr(depends_on)}
def upgrade():
${upgrades if upgrades else "pass"}
def downgrade():
${downgrades if downgrades else "pass"}

View File

@ -91,9 +91,6 @@ function configure_tatu {
if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
setup_colorized_logging_tatu $TATU_CONF DEFAULT "tenant" "user"
fi
# Backend Plugin Configuation
configure_tatu_backend
}
function configure_tatudashboard {
@ -137,27 +134,14 @@ function init_tatu {
# install_tatu - Collect source and prepare
function install_tatu {
if is_ubuntu; then
install_package libcap2-bin
elif is_fedora; then
# bind-utils package provides `dig`
install_package libcap bind-utils
fi
git_clone $TATU_REPO $TATU_DIR $TATU_BRANCH
setup_develop $TATU_DIR
install_tatu_backend
}
# install_tatuclient - Collect source and prepare
function install_tatuclient {
if use_library_from_git "python-tatuclient"; then
git_clone_by_name "python-tatuclient"
setup_dev_lib "python-tatuclient"
else
pip_install_gr "python-tatuclient"
fi
}
# install_tatudashboard - Collect source and prepare

View File

@ -35,14 +35,15 @@ TATU_ROOTWRAP_CONF=$TATU_CONF_DIR/rootwrap.conf
TATU_APIPASTE_CONF=$TATU_CONF_DIR/api-paste.ini
# Default repositories
TATU_REPO=${TATU_REPO:-${GIT_BASE}/pinodeca/tatu.git}
TATU_GIT_BASE=https://github.com
TATU_REPO=${TATU_REPO:-${TATU_GIT_BASE}/pinodeca/tatu.git}
TATU_BRANCH=${TATU_BRANCH:-master}
GITREPO["tatu-dashboard"]=${TATUDASHBOARD_REPO:-${GIT_BASE}/pinodeca/tatu-dashboard.git}
GITREPO["tatu-dashboard"]=${TATUDASHBOARD_REPO:-${TATU_GIT_BASE}/pinodeca/tatu-dashboard.git}
GITBRANCH["tatu-dashboard"]=${TATUDASHBOARD_BRANCH:-master}
GITDIR["tatu-dashboard"]=$DEST/tatu-dashboard
GITREPO["python-tatuclient"]=${TATUCLIENT_REPO:-${GIT_BASE}/pinodeca/python-tatuclient.git}
GITREPO["python-tatuclient"]=${TATUCLIENT_REPO:-${TATU_GIT_BASE}/pinodeca/python-tatuclient.git}
GITBRANCH["python-tatuclient"]=${TATUCLIENT_BRANCH:-master}
GITDIR["python-tatuclient"]=$DEST/python-tatuclient

View File

@ -6,6 +6,7 @@ SERVICE_PASSWORD=pinot
SERVICE_TOKEN=pinot
ADMIN_PASSWORD=pinot
enable_plugin tatu https://github.com/pinodeca/tatu
enable_plugin designate https://git.openstack.org/openstack/designate
enable_plugin barbican https://git.openstack.org/openstack/barbican
enable_plugin dragonflow https://github.com/pinodeca/dragonflow tatu

View File

@ -5,11 +5,11 @@ port = 18322
[composite:main]
use = egg:Paste#urlmap
/ = myapp
/noauth = myapp
/ = auth_app
/noauth = noauth_app
[pipeline:auth]
pipeline = authtoken myapp
pipeline = authtoken main
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
@ -21,9 +21,11 @@ admin_token = gAAAAABaPEXR3jF2TqsraXh7qkpKOiPcVbnmHdMEsrSYRKUnfQvpCAR9Sq02vDZNcQ
#admin_password = pinot
#admin_tenant_name = service
[app:myapp]
#use = call:tatu.api.app:main
paste.app_factory = tatu.api.app:main
[app:auth_app]
paste.app_factory = tatu.api.app:auth_factory
[app:noauth_app]
paste.app_factory = tatu.api.app:noauth_factory
###
# logging configuration

View File

@ -1 +0,0 @@
{"cloud-init": "#cloud-config\nmounts:\n - [ /dev/disk/by-label/config-2, /mnt/config ]\npackages:\n - python\n - python-requests\nwrite_files:\n - path: /root/setup-ssh.py\n permissions: '0700'\n owner: root:root\n content: |\n print 'Importing packages'\n import json\n import requests\n import os\n import subprocess\n import uuid\n def getVendordataFromConfigDrive():\n path = '/mnt/config/openstack/latest/vendor_data2.json'\n with open(path, 'r') as f:\n json_string = f.read()\n return json.loads(json_string)\n def getInstanceAndProjectIdFromConfigDrive():\n path = '/mnt/config/openstack/latest/meta_data.json'\n with open(path, 'r') as f:\n json_string = f.read()\n metadata = json.loads(json_string)\n assert 'uuid' in metadata\n assert 'project_id' in metadata\n return str(uuid.UUID(metadata['uuid'], version=4)), str(uuid.UUID(metadata['project_id'], version=4))\n print 'Getting vendordata from ConfigDrive'\n vendordata = getVendordataFromConfigDrive()\n print 'Getting instance and project IDs'\n instance_id, project_id = getInstanceAndProjectIdFromConfigDrive()\n assert 'tatu' in vendordata\n tatu = vendordata['tatu']\n assert 'token' in tatu\n assert 'auth_pub_key_user' in tatu\n assert 'principals' in tatu\n principals = tatu['principals'].split(',')\n with open('/etc/ssh/ssh_host_rsa_key.pub', 'r') as f:\n host_key_pub = f.read()\n server = 'http://172.24.4.1:18322'\n hostcert_request = {\n 'token_id': tatu['token'],\n 'host_id': instance_id,\n 'pub_key': host_key_pub\n }\n print 'Request the host certificate.'\n response = requests.post(\n # Hard-coded SSHaaS API address will only work for devstack and requires\n # routing and SNAT or DNAT.\n # This eventually needs to be either:\n # 1) 169.254.169.254 if there's a SSHaaS-proxy; OR\n # 2) the real address of the API, possibly supplied in the vendordata and\n # still requiring routing and SNAT or DNAT.\n server + '/noauth/hostcerts',\n data=json.dumps(hostcert_request)\n )\n print 'Got the host certificate: {}'.format(response.content)\n assert response.status_code == 201\n assert 'location' in response.headers\n location = response.headers['location']\n # No need to GET the host cert - it's returned in the POST\n #response = requests.get(server + location)\n hostcert = json.loads(response.content)\n assert 'host_id' in hostcert\n assert hostcert['host_id'] == instance_id\n assert 'fingerprint' in hostcert\n assert 'auth_id' in hostcert\n auth_id = str(uuid.UUID(hostcert['auth_id'], version=4))\n assert auth_id == project_id\n assert 'cert' in hostcert\n print 'Begin writing files.'\n # Write the host's certificate\n with open('/etc/ssh/ssh_host_rsa_key-cert.pub', 'w') as f:\n f.write(hostcert['cert'])\n # Write the authorized principals file\n os.mkdir('/etc/ssh/auth_principals')\n with open('/etc/ssh/auth_principals/ubuntu', 'w') as f:\n for p in principals:\n f.write(p + os.linesep)\n # Write the User CA public key file\n with open('/etc/ssh/ca_user.pub', 'w') as f:\n f.write(tatu['auth_pub_key_user'])\n print 'All tasks completed.'\nruncmd:\n - python /root/setup-ssh.py > /var/log/setup-ssh.log 2>&1\n - sed -i -e '$aTrustedUserCAKeys /etc/ssh/ca_user.pub' /etc/ssh/sshd_config\n - sed -i -e '$aAuthorizedPrincipalsFile /etc/ssh/auth_principals/%u' /etc/ssh/sshd_config\n - sed -i -e '$aHostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub' /etc/ssh/sshd_config\n - systemctl restart ssh\n"}

View File

@ -3,6 +3,7 @@
[tatu]
use_barbican_key_manager = True
#use_pat_bastions = True
ssh_port = 1222
num_total_pats = 1
num_pat_bastions_per_server = 1
#pat_dns_zone_name = tatuPAT.com.

View File

@ -1,116 +1,118 @@
#cloud-config
mounts:
- [ /dev/disk/by-label/config-2, /mnt/config ]
packages:
- python
- python-requests
write_files:
- path: /root/setup-ssh.py
- path: /root/tatu-setup-ssh.sh
permissions: '0700'
owner: root:root
content: |
#!/usr/bin/env python
print 'Importing packages'
import json
import requests
import os
import uuid
def getVendordataFromConfigDrive():
path = '/mnt/config/openstack/latest/vendor_data2.json'
with open(path, 'r') as f:
json_string = f.read()
return json.loads(json_string)
def getInstanceAndProjectIdFromConfigDrive():
path = '/mnt/config/openstack/latest/meta_data.json'
with open(path, 'r') as f:
json_string = f.read()
metadata = json.loads(json_string)
assert 'uuid' in metadata
assert 'project_id' in metadata
return str(uuid.UUID(metadata['uuid'], version=4)), str(uuid.UUID(metadata['project_id'], version=4))
print 'Getting vendordata from ConfigDrive'
vendordata = getVendordataFromConfigDrive()
print 'Getting instance and project IDs'
instance_id, project_id = getInstanceAndProjectIdFromConfigDrive()
assert 'tatu' in vendordata
tatu = vendordata['tatu']
assert 'token' in tatu
assert 'auth_pub_key_user' in tatu
assert 'principals' in tatu
principals = tatu['principals'].split(',')
with open('/etc/ssh/ssh_host_rsa_key.pub', 'r') as f:
host_key_pub = f.read()
server = 'http://172.24.4.1:18322'
hostcert_request = {
'token_id': tatu['token'],
'host_id': instance_id,
'pub_key': host_key_pub
}
print 'Request the host certificate.'
response = requests.post(
# Hard-coded SSHaaS API address will only work for devstack and requires
# routing and SNAT or DNAT.
# This eventually needs to be either:
# 1) 169.254.169.254 if there's a SSHaaS-proxy; OR
# 2) the real address of the API, possibly supplied in the vendordata and
# still requiring routing and SNAT or DNAT.
server + '/noauth/hostcerts',
data=json.dumps(hostcert_request)
)
print 'Got the host certificate: {}'.format(response.content)
assert response.status_code == 201
assert 'location' in response.headers
location = response.headers['location']
# No need to GET the host cert - it's returned in the POST
#response = requests.get(server + location)
hostcert = json.loads(response.content)
assert 'host_id' in hostcert
assert hostcert['host_id'] == instance_id
assert 'fingerprint' in hostcert
assert 'auth_id' in hostcert
auth_id = str(uuid.UUID(hostcert['auth_id'], version=4))
assert auth_id == project_id
assert 'cert' in hostcert
print 'Begin writing files.'
# Write the host's certificate
with open('/etc/ssh/ssh_host_rsa_key-cert.pub', 'w') as f:
f.write(hostcert['cert'])
# Write the authorized principals file
os.mkdir('/etc/ssh/auth_principals')
with open('/etc/ssh/auth_principals/root', 'w') as f:
for p in principals:
f.write(p + os.linesep)
# Write the User CA public key file
with open('/etc/ssh/ca_user.pub', 'w') as f:
f.write(tatu['auth_pub_key_user'])
print 'All tasks completed.'
- path: /root/manage-revoked_keys.py
#!/bin/bash
# Name: tatu-setup-ssh.sh
#
# Purpose: Fetch a SSH host cert from Tatu and configure SSH to use certs.
metadata=$(cat /mnt/config/openstack/latest/meta_data.json)
auth_id=$(echo $metadata | grep -Po 'project_id": "\K[^"]*')
if [ -z $auth_id ]; then
echo Failed to extract the project ID from metadata
exit 1
fi
echo auth_id=$auth_id
host_id=$(echo $metadata | grep -Po 'uuid": "\K[^"]*')
echo host_id=$host_id
vendordata=$(cat /mnt/config/openstack/latest/vendor_data2.json)
token=$(echo $vendordata | grep -Po '"token": "\K[^"]*')
if [ -z $token ]; then
echo Failed to extract the Tatu token ID from vendordata
exit 1
fi
echo token=$token
ca_user=$(echo $vendordata | grep -Po '"auth_pub_key_user": "\K[^"]*')
echo ca_user=$ca_user
echo $ca_user > /etc/ssh/ca_user.pub
#root_principals=$(echo $vendordata | grep -Po '"root_principals": "\K[^"]*')
#echo root_principals=$root_principals
users=$(echo $vendordata | grep -Po '"users": "\K[^"]*')
echo users=$users
sudoers=$(echo $vendordata | grep -Po '"sudoers": "\K[^"]*')
echo sudoers=$sudoers
ssh_port=$(echo $vendordata | grep -Po '"ssh_port": \K[^,]*')
echo ssh_port=$ssh_port
host_pub_key=$(cat /etc/ssh/ssh_host_rsa_key.pub)
echo host public key is $host_pub_key
data=$(echo {\"token_id\": \"$token\", \"host_id\": \"$host_id\", \"pub_key\": \"$host_pub_key\"})
echo $data > /tmp/tatu_cert_request.json
url=http://169.254.169.254/noauth/hostcerts
echo url=$url
echo Posting Host Certificate request to Tatu API
response=$(curl -s -w "%{http_code}" -d "@/tmp/tatu_cert_request.json" -X POST $url)
code=${response##*\}}
if [ "$code" != "200" ]; then
echo Curl to Tatu API failed with code $code
exit 1
fi
echo Tatu response is $response
cert=$(echo $response | grep -Po 'cert": "\K[^"]*')
cert=${cert%%\\n} # TODO: fix the trailing \n on the server side.
echo $cert > /etc/ssh/ssh_host_rsa_key-cert.pub
mkdir -p /etc/ssh/auth_principals
#root_principals_file=/etc/ssh/auth_principals/root
#> $root_principals_file
#for i in ${root_principals//,/ }
#do
# echo $i >> $root_principals_file
#done
for i in ${users//,/ }; do
id -u $i > /dev/null 2>&1
if [ $? == 1 ]; then
#adduser --disabled-password --gecos '' $i
adduser $i
fi
done
for i in ${sudoers//,/ }; do
if [ $(getent group sudo) ]; then
usermod -aG sudo $i
fi
if [ $(getent group wheel) ]; then
usermod -aG wheel $i
fi
done
sed -i -e '$aTrustedUserCAKeys /etc/ssh/ca_user.pub' /etc/ssh/sshd_config
# man sshd_config, under AuthorizedPrincipalsFile: The default is none, i.e. not to use a principals file
# in this case, the username of the user must appear in a certificate's principals list for it to be accepted.
#sed -i -e '$aAuthorizedPrincipalsFile /etc/ssh/auth_principals/%u' /etc/ssh/sshd_config
sed -i -e '$aHostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub' /etc/ssh/sshd_config
sed -i -e '/^PasswordAuthentication /d' /etc/ssh/sshd_config
sed -i -e '$aPasswordAuthentication no' /etc/ssh/sshd_config
> /etc/ssh/revoked-keys
sed -i -e '$aRevokedKeys /etc/ssh/revoked-keys' /etc/ssh/sshd_config
sed -i -e '$aPort '"$ssh_port" /etc/ssh/sshd_config
sed -i -e '$aPort 22' /etc/ssh/sshd_config
setenforce permissive
systemctl restart sshd
- path: /root/tatu-manage-revoked-keys.sh
permissions: '0700'
owner: root:root
content: |
#!/bin/bash
# Name: tatu-manage-revoked-keys.sh
#
# Purpose: Fetch the revoked keys data from Tatu and write it to /etc/ssh
# !/usr/bin/env python
import base64
import json
import requests
import uuid
path = '/mnt/config/openstack/latest/meta_data.json'
with open(path, 'r') as f:
json_string = f.read()
metadata = json.loads(json_string)
auth_id = str(uuid.UUID(metadata['project_id'], version=4))
server = 'http://172.24.4.1:18322'
response = requests.get(server + '/noauth/revokeduserkeys/' + auth_id)
assert response.status_code == 200
body = json.loads(response.content)
assert 'revoked_keys_data' in body
with open('/etc/ssh/revoked-keys', 'w') as f:
f.write(base64.b64decode(body['revoked_keys_data']))
metadata=$(cat /mnt/config/openstack/latest/meta_data.json)
auth_id=$(echo $metadata | grep -Po 'project_id": "\K[^"]*')
echo auth_id=$auth_id
url=http://169.254.169.254/noauth/revokeduserkeys/$auth_id
echo url=$url
response=$(curl -s -w "%{http_code}" $url)
code=${response##*\}}
if [ "$code" != "200" ]; then
echo Curl to Tatu API failed with code $code
exit 1
fi
echo Tatu response is $response
b64revoked=$(echo $response | grep -Po 'revoked_keys_data": "\K[^"]*')
echo $b64revoked | base64 -d > /etc/ssh/revoked-keys
runcmd:
- dnf install -y python python-requests
- python /root/setup-ssh.py > /var/log/setup-ssh.log 2>&1
- sed -i -e '$aTrustedUserCAKeys /etc/ssh/ca_user.pub' /etc/ssh/sshd_config
- sed -i -e '$aAuthorizedPrincipalsFile /etc/ssh/auth_principals/%u' /etc/ssh/sshd_config
- sed -i -e '$aHostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub' /etc/ssh/sshd_config
- python /root/manage-revoked-keys.py >> /var/log/setup-ssh.log 2>&1
- sed -i -e '$aRevokedKeys /etc/ssh/revoked-keys' /etc/ssh/sshd_config
- systemctl restart sshd
- /root/tatu-setup-ssh.sh > /var/log/tatu-setup-ssh.log 2>&1
- /root/tatu-manage-revoked-keys.sh > /var/log/tatu-revoked-keys.log
- crontab -l | { cat; echo "* * * * * /root/tatu-manage-revoked-keys.sh >> /var/log/tatu-revoked-keys.log"; } | crontab -

View File

@ -1,90 +0,0 @@
#cloud-config
mounts:
- [ /dev/disk/by-label/config-2, /mnt/config ]
write_files:
- path: /root/tatu-setup-ssh.sh
permissions: '0700'
owner: root:root
content: |
#!/bin/bash
# Name: tatu-setup-ssh.sh
#
# Purpose: Fetch a SSH host cert from Tatu and configure SSH to use certs.
metadata=$(cat /mnt/config/openstack/latest/meta_data.json)
auth_id=$(echo $metadata | grep -Po 'project_id": "\K[^"]*')
if [ -z $auth_id ]; then
echo Failed to extract the project ID from metadata
exit 1
fi
echo auth_id=$auth_id
host_id=$(echo $metadata | grep -Po 'uuid": "\K[^"]*')
echo host_id=$host_id
vendordata=$(cat /mnt/config/openstack/latest/vendor_data2.json)
token=$(echo $vendordata | grep -Po 'token": "\K[^"]*')
if [ -z $token ]; then
echo Failed to extract the Tatu token ID from vendordata
exit 1
fi
echo token=$token
ca_user=$(echo $vendordata | grep -Po 'auth_pub_key_user": "\K[^"]*')
echo ca_user=$ca_user
echo $ca_user > /etc/ssh/ca_user.pub
principals=$(echo $vendordata | grep -Po 'principals": "\K[^"]*')
echo principals=$principals
host_pub_key=$(cat /etc/ssh/ssh_host_rsa_key.pub)
echo host public key is $host_pub_key
data=$(echo {\"token_id\": \"$token\", \"host_id\": \"$host_id\", \"pub_key\": \"$host_pub_key\"})
echo $data > /tmp/tatu_cert_request.json
url=http://169.254.169.254/noauth/hostcerts
echo url=$url
echo Posting Host Certificate request to Tatu API
response=$(curl -s -w "%{http_code}" -d "@/tmp/tatu_cert_request.json" -X POST $url)
code=${response##*\}}
if [ "$code" != "200" ]; then
echo Curl to Tatu API failed with code $code
exit 1
fi
echo Tatu response is $response
cert=$(echo $response | grep -Po 'cert": "\K[^"]*')
cert=${cert%%\\n} # TODO: fix the trailing \n on the server side.
echo $cert > /etc/ssh/ssh_host_rsa_key-cert.pub
mkdir -p /etc/ssh/auth_principals
principals_file=/etc/ssh/auth_principals/root
> $principals_file
for i in ${principals//,/ }
do
echo $i >> $principals_file
done
sed -i -e '$aTrustedUserCAKeys /etc/ssh/ca_user.pub' /etc/ssh/sshd_config
sed -i -e '$aAuthorizedPrincipalsFile /etc/ssh/auth_principals/%u' /etc/ssh/sshd_config
sed -i -e '$aHostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub' /etc/ssh/sshd_config
> /etc/ssh/revoked-keys
sed -i -e '$aRevokedKeys /etc/ssh/revoked-keys' /etc/ssh/sshd_config
systemctl restart sshd
- path: /root/tatu-manage-revoked-keys.sh
permissions: '0700'
owner: root:root
content: |
#!/bin/bash
# Name: tatu-manage-revoked-keys.sh
#
# Purpose: Fetch the revoked keys data from Tatu and write it to /etc/ssh
# !/usr/bin/env python
metadata=$(cat /mnt/config/openstack/latest/meta_data.json)
auth_id=$(echo $metadata | grep -Po 'project_id": "\K[^"]*')
echo auth_id=$auth_id
url=http://169.254.169.254/noauth/revokeduserkeys/$auth_id
echo url=$url
response=$(curl -s -w "%{http_code}" $url)
code=${response##*\}}
if [ "$code" != "200" ]; then
echo Curl to Tatu API failed with code $code
exit 1
fi
echo Tatu response is $response
b64revoked=$(echo $response | grep -Po 'revoked_keys_data": "\K[^"]*')
echo $b64revoked | base64 -d > /etc/ssh/revoked-keys
runcmd:
- /root/tatu-setup-ssh.sh > /var/log/tatu-setup-ssh.log 2>&1
- /root/tatu-manage-revoked-keys.sh > /var/log/tatu-revoked-keys.log
- crontab -l | { cat; echo "* * * * * /root/tatu-manage-revoked-keys.sh >> /var/log/tatu-revoked-keys.log"; } | crontab -

View File

@ -13,7 +13,7 @@ python-keystoneclient!=1.8.0,!=2.1.0,>=1.7.0 # Apache-2.0
keystoneauth1>=2.7.0 # Apache-2.0
keystonemiddleware>=4.12.0 # Apache-2.0
oslo.messaging>=5.34.0 # Apache-2.0
oslo.log>=3.34.0
oslo.log>=3.36.0 # Apache-2.0
pyramid>=1.9.1 # BSD-derived (http://www.repoze.org/LICENSE.txt)
Paste # MIT
dogpile.cache

View File

@ -1,40 +0,0 @@
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import json
import requests
import sys
import uuid
parser = argparse.ArgumentParser(description="Get the CA's public keys from Tatu API.")
parser.add_argument('--projid', '-P', required=True)
parser.add_argument('--tatu-url', default= 'http://127.0.0.1:18322',
help='URL of the Tatu API')
args = parser.parse_args()
try:
auth_id = str(uuid.UUID(args.projid, version=4))
except:
print '--projid should be the UUID of a Tatu CA (usually a cloud tenant/project).'
exit()
server = args.tatu_url
response = requests.post(
server + '/authorities',
data=json.dumps({'auth_id': auth_id})
)
assert response.status_code == 201
assert 'location' in response.headers
print response.headers['location']
assert response.headers['location'] == '/authorities/' + auth_id

View File

@ -1,36 +0,0 @@
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import json
import requests
import sys
import uuid
parser = argparse.ArgumentParser(description="Get the CA's public keys from Tatu API.")
parser.add_argument('--projid', '-P', required=True)
parser.add_argument('--tatu-url', default= 'http://127.0.0.1:18322',
help='URL of the Tatu API')
args = parser.parse_args()
try:
auth_id = str(uuid.UUID(args.projid, version=4))
except:
print '--projid should be the UUID of a Tatu CA (usually a cloud tenant/project).'
exit()
server = args.tatu_url
response = requests.get(server + '/authorities/' + auth_id)
if response.status_code != 200:
print 'Failed to retrieve the CA keys.'
print response.content

View File

@ -1,68 +0,0 @@
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import json
import os
import requests
import subprocess
import uuid
from Crypto.PublicKey import RSA
parser = argparse.ArgumentParser(description='Get a user certificate from Tatu API.')
parser.add_argument('--projid', '-P', required=True)
parser.add_argument('--pubkeyfile', '-K', required=True)
parser.add_argument('--userid', '-U', required=True)
parser.add_argument('--tatu-url', default= 'http://127.0.0.1:18322',
help='URL of the Tatu API')
args = parser.parse_args()
if not os.path.isfile(args.pubkeyfile):
print '--pubkeyfile must point to a valid public key.'
exit()
try:
auth_id = str(uuid.UUID(args.projid, version=4))
except:
print '--projid should be the UUID of a Tatu CA (usually a cloud tenant/project).'
exit()
try:
user_id = str(uuid.UUID(args.userid, version=4))
except:
print '--userid should be the UUID of a user with permissions in the cloud project.'
exit()
with open(args.pubkeyfile, 'r') as f:
pubkeytext = f.read()
server = args.tatu_url
user = {
'user_id': user_id,
'auth_id': auth_id,
'pub_key': pubkeytext
}
response = requests.post(
server + '/usercerts',
data=json.dumps(user)
)
if response.status_code != 201:
print 'Failed: ' + str(response)
exit()
assert 'location' in response.headers
location = response.headers['location']
response = requests.get(server + location)
usercert = json.loads(response.content)
print usercert['cert']

View File

@ -1,47 +0,0 @@
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import json
import os
import requests
import subprocess
import uuid
from Crypto.PublicKey import RSA
parser = argparse.ArgumentParser(description='Revoke a Tatu-generated user SSH certificate.')
parser.add_argument('--projid', '-P', required=True)
parser.add_argument('--serial', '-S', required=True)
parser.add_argument('--tatu-url', default= 'http://127.0.0.1:18322',
help='URL of the Tatu API')
args = parser.parse_args()
try:
auth_id = str(uuid.UUID(args.projid, version=4))
except:
print '--projid should be the UUID of a Tatu CA (usually a cloud tenant/project).'
exit()
if not args.serial.isdigit():
print '--serial should be a number'
exit()
server = args.tatu_url
response = requests.post(
server + '/revokeduserkeys/' + auth_id,
data=json.dumps({'serial': args.serial})
)
if response.status_code != 200:
print 'Failed: ' + str(response)
exit()

View File

@ -18,23 +18,31 @@ from tatu.db.persistence import SQLAlchemySessionManager
LOG = logging.getLogger(__name__)
def create_app(sa):
LOG.info("Creating falcon API instance.")
LOG.info("Creating falcon API instance for authenticated API calls.")
api = falcon.API(middleware=[models.Logger(), sa])
api.add_route('/authorities', models.Authorities())
api.add_route('/authorities/{auth_id}', models.Authority())
api.add_route('/usercerts', models.UserCerts())
api.add_route('/usercerts/{serial}', models.UserCert())
api.add_route('/hosts', models.Hosts())
api.add_route('/hosts/{host_id}', models.Host())
api.add_route('/hostcerts', models.HostCerts())
api.add_route('/hostcerts/{host_id}/{fingerprint}', models.HostCert())
api.add_route('/hosttokens', models.Tokens())
api.add_route('/novavendordata', models.NovaVendorData())
api.add_route('/revokeduserkeys/{auth_id}', models.RevokedUserKeys())
api.add_route('/pats', models.PATs())
return api
def create_noauth_app(sa):
LOG.info("Creating falcon API instance for unauthenticated API calls.")
api = falcon.API(middleware=[models.Logger(), sa])
api.add_route('/hostcerts', models.HostCerts())
api.add_route('/revokeduserkeys/{auth_id}', models.RevokedUserKeys())
return api
def get_app():
def auth_factory(global_config, **settings):
return create_app(SQLAlchemySessionManager())
def main(global_config, **settings):
return create_app(SQLAlchemySessionManager())
def noauth_factory(global_config, **settings):
return create_noauth_app(SQLAlchemySessionManager())

View File

@ -19,8 +19,10 @@ from oslo_log import log as logging
from tatu.config import CONF
from tatu.db import models as db
from tatu.dns import add_srv_records, get_srv_url
from tatu.pat import create_pat_entries, get_port_ip_tuples
from tatu.dns import add_srv_records
from tatu.ks_utils import getProjectRoleNames, getProjectNameForID, getUserNameForID
from tatu.pat import create_pat_entries, getAllPats, ip_port_tuples_to_string
from tatu.utils import canonical_uuid_string, datetime_to_string
LOG = logging.getLogger(__name__)
@ -29,7 +31,7 @@ def validate_uuid(map, key):
try:
# Verify UUID is valid, then convert to canonical string representation
# to avoiid DB errors.
map[key] = str(uuid.UUID(map[key], version=4))
map[key] = canonical_uuid_string(map[key])
except ValueError:
msg = '{} is not a valid UUID'.format(map[key])
raise falcon.HTTPBadRequest('Bad request', msg)
@ -73,34 +75,33 @@ class Logger(object):
resp.status, resp.location, resp.body))
def _authAsDict(auth):
return {
'auth_id': auth.auth_id,
'name': auth.name,
'user_pub_key': auth.user_pub_key,
'host_pub_key': auth.host_pub_key,
}
class Authorities(object):
@falcon.before(validate)
def on_post(self, req, resp):
id = req.body['auth_id']
try:
db.createAuthority(
self.session,
req.body['auth_id'],
id,
getProjectNameForID(id)
)
except KeyError as e:
raise falcon.HTTPBadRequest(str(e))
resp.status = falcon.HTTP_201
resp.location = '/authorities/' + req.body['auth_id']
resp.location = '/authorities/{}'.format(id)
@falcon.before(validate)
def on_get(self, req, resp):
auths = db.getAuthorities(self.session)
items = []
for auth in auths:
user_key = RSA.importKey(db.getAuthUserKey(auth))
user_pub_key = user_key.publickey().exportKey('OpenSSH')
host_key = RSA.importKey(db.getAuthHostKey(auth))
host_pub_key = host_key.publickey().exportKey('OpenSSH')
items.append({
'auth_id': auth.auth_id,
'user_pub_key': user_pub_key,
'host_pub_key': host_pub_key,
})
body = {'CAs': items}
body = {'CAs': [_authAsDict(auth)
for auth in db.getAuthorities(self.session)]}
resp.body = json.dumps(body)
resp.status = falcon.HTTP_OK
@ -112,52 +113,46 @@ class Authority(object):
if auth is None:
resp.status = falcon.HTTP_NOT_FOUND
return
user_key = RSA.importKey(db.getAuthUserKey(auth))
user_pub_key = user_key.publickey().exportKey('OpenSSH')
host_key = RSA.importKey(db.getAuthHostKey(auth))
host_pub_key = host_key.publickey().exportKey('OpenSSH')
body = {
'auth_id': auth_id,
'user_pub_key': user_pub_key,
'host_pub_key': host_pub_key
}
resp.body = json.dumps(body)
resp.body = json.dumps(_authAsDict(auth))
resp.status = falcon.HTTP_OK
def _userAsDict(user):
def _userCertAsDict(cert):
return {
'user_id': user.user_id,
'fingerprint': user.fingerprint,
'auth_id': user.auth_id,
'cert': user.cert,
'revoked': user.revoked,
'serial': user.serial,
'user_id': cert.user_id,
'user_name': cert.user_name,
'principals': cert.principals,
'fingerprint': cert.fingerprint,
'auth_id': cert.auth_id,
'cert': cert.cert.strip('\n'),
'revoked': cert.revoked,
'serial': cert.serial,
'created_at': datetime_to_string(cert.created_at),
'expires_at': datetime_to_string(cert.expires_at),
}
class UserCerts(object):
@falcon.before(validate)
def on_post(self, req, resp):
# TODO(pino): validation
id = req.body['user_id']
try:
user = db.createUserCert(
user_cert = db.createUserCert(
self.session,
req.body['user_id'],
id,
getUserNameForID(id),
req.body['auth_id'],
req.body['pub_key']
)
except KeyError as e:
raise falcon.HTTPBadRequest(str(e))
resp.status = falcon.HTTP_201
resp.location = '/usercerts/' + user.user_id + '/' + user.fingerprint
resp.body = json.dumps(_userAsDict(user))
resp.location = '/usercerts/{}/{}'.format(id, user_cert.fingerprint)
resp.body = json.dumps(_userCertAsDict(user_cert))
@falcon.before(validate)
def on_get(self, req, resp):
users = db.getUserCerts(self.session)
items = []
for user in users:
items.append(_userAsDict(user))
body = {'users': items}
body = {'certs': [_userCertAsDict(cert)
for cert in db.getUserCerts(self.session)]}
resp.body = json.dumps(body)
resp.status = falcon.HTTP_OK
@ -169,24 +164,49 @@ class UserCert(object):
if user is None:
resp.status = falcon.HTTP_NOT_FOUND
return
resp.body = json.dumps(_userAsDict(user))
resp.body = json.dumps(_userCertAsDict(user))
resp.status = falcon.HTTP_OK
def _hostAsDict(host):
item = {
'host_id': host.host_id,
'fingerprint': host.fingerprint,
'auth_id': host.auth_id,
'cert': host.cert,
'hostname': host.hostname,
return {
'id': host.id,
'name': host.name,
'pat_bastions': host.pat_bastions,
'srv_url': host.srv_url,
}
class Hosts(object):
@falcon.before(validate)
def on_get(self, req, resp):
body = {'hosts': [_hostAsDict(host)
for host in db.getHosts(self.session)]}
resp.body = json.dumps(body)
resp.status = falcon.HTTP_OK
class Host(object):
@falcon.before(validate)
def on_get(self, req, resp, host_id):
host = db.getHost(self.session, host_id)
if host is None:
resp.status = falcon.HTTP_NOT_FOUND
return
resp.body = json.dumps(_hostAsDict(host))
resp.status = falcon.HTTP_OK
def _hostCertAsDict(cert):
return {
'host_id': cert.host_id,
'fingerprint': cert.fingerprint,
'auth_id': cert.auth_id,
'cert': cert.cert.strip('\n'),
'hostname': cert.hostname,
'created_at': datetime_to_string(cert.created_at),
'expires_at': datetime_to_string(cert.expires_at),
}
if CONF.tatu.use_pat_bastions:
item['pat_bastions'] = ','.join(
'{}:{}'.format(t[1], t[0]) for t in
get_port_ip_tuples(host.host_id, 22))
item['srv_url'] = get_srv_url(host.hostname, host.auth_id)
return item
class HostCerts(object):
@ -195,7 +215,7 @@ class HostCerts(object):
# Note that we could have found the host_id using the token_id.
# But requiring the host_id makes it a bit harder to steal the token.
try:
host = db.createHostCert(
cert = db.createHostCert(
self.session,
req.body['token_id'],
req.body['host_id'],
@ -203,17 +223,14 @@ class HostCerts(object):
)
except KeyError as e:
raise falcon.HTTPBadRequest(str(e))
resp.body = json.dumps(_hostAsDict(host))
resp.body = json.dumps(_hostCertAsDict(cert))
resp.status = falcon.HTTP_200
resp.location = '/hostcerts/' + host.host_id + '/' + host.fingerprint
resp.location = '/hostcerts/' + cert.host_id + '/' + cert.fingerprint
@falcon.before(validate)
def on_get(self, req, resp):
hosts = db.getHostCerts(self.session)
items = []
for host in hosts:
items.append(_hostAsDict(host))
body = {'hosts': items}
body = {'certs': [_hostCertAsDict(cert)
for cert in db.getHostCerts(self.session)]}
resp.body = json.dumps(body)
resp.status = falcon.HTTP_OK
@ -221,11 +238,11 @@ class HostCerts(object):
class HostCert(object):
@falcon.before(validate)
def on_get(self, req, resp, host_id, fingerprint):
host = db.getHostCert(self.session, host_id, fingerprint)
if host is None:
cert = db.getHostCert(self.session, host_id, fingerprint)
if cert is None:
resp.status = falcon.HTTP_NOT_FOUND
return
resp.body = json.dumps(_hostAsDict(host))
resp.body = json.dumps(_hostCertAsDict(cert))
resp.status = falcon.HTTP_OK
@ -257,37 +274,54 @@ class NovaVendorData(object):
# "project-id": "039d104b7a5c4631b4ba6524d0b9e981",
# "user-data": null
# }
instance_id = req.body['instance-id']
hostname = req.body['hostname']
project_id = req.body['project-id']
try:
token = db.createToken(
self.session,
req.body['instance-id'],
req.body['project-id'],
req.body['hostname']
instance_id,
project_id,
hostname,
)
except KeyError as e:
raise falcon.HTTPBadRequest(str(e))
auth = db.getAuthority(self.session, req.body['project-id'])
auth = db.getAuthority(self.session, project_id)
if auth is None:
resp.status = falcon.HTTP_NOT_FOUND
return
key = RSA.importKey(db.getAuthUserKey(auth))
pub_key = key.publickey().exportKey('OpenSSH')
roles = getProjectRoleNames(req.body['project-id'])
vendordata = {
'token': token.token_id,
'auth_pub_key_user': pub_key,
'principals': 'admin'
'auth_pub_key_user': auth.user_pub_key,
'root_principals': '', #keep in case we want to use it later
'users': ','.join(roles),
'sudoers': ','.join([r for r in roles if "admin" in r]),
'ssh_port': CONF.tatu.ssh_port,
}
resp.body = json.dumps(vendordata)
resp.location = '/hosttokens/' + token.token_id
resp.status = falcon.HTTP_201
host = db.getHost(self.session, instance_id)
if host is None:
# TODO(pino): make the whole workflow fault-tolerant
# TODO(pino): make this configurable per project or subnet
pat_bastions = ''
srv_url = ''
if CONF.tatu.use_pat_bastions:
port_ip_tuples = create_pat_entries(self.session,
req.body['instance-id'], 22)
add_srv_records(req.body['hostname'], req.body['project-id'],
port_ip_tuples)
ip_port_tuples = create_pat_entries(self.session, instance_id)
srv_url = add_srv_records(hostname, auth.name, ip_port_tuples)
pat_bastions = ip_port_tuples_to_string(ip_port_tuples)
# else, e.g. call LBaaS API
db.createHost(session=self.session,
id=instance_id,
name=hostname,
pat_bastions=pat_bastions,
srv_url=srv_url,
)
class RevokedUserKeys(object):
@falcon.before(validate)
@ -306,8 +340,21 @@ class RevokedUserKeys(object):
self.session,
auth_id,
serial=req.body.get('serial', None),
key_id=req.body.get('key_id', None),
cert=req.body.get('cert', None)
)
resp.status = falcon.HTTP_OK
resp.body = json.dumps({})
class PATs(object):
@falcon.before(validate)
def on_get(self, req, resp):
items = []
for p in getAllPats():
items.append({
'ip': str(p.ip_address),
'chassis': p.chassis.id,
'lport': p.lport.id,
})
body = {'pats': items}
resp.body = json.dumps(body)
resp.status = falcon.HTTP_OK

View File

@ -15,6 +15,7 @@ from dragonflow import conf as dragonflow_cfg
from dragonflow.db import api_nb
from keystoneauth1 import session as keystone_session
from keystoneauth1.identity import v3
from keystoneclient.v3 import client as keystone_client
from novaclient import client as nova_client
from neutronclient.v2_0 import client as neutron_client
from oslo_config import cfg
@ -30,6 +31,8 @@ opts = [
help='Use OpenStack Barbican to store sensitive data'),
cfg.BoolOpt('use_pat_bastions', default=True,
help='Use PAT as a "poor man\'s" approach to bastions'),
cfg.IntOpt('ssh_port', default=2222,
help='SSH server port number managed by Tatu (may be other than 22)'),
cfg.IntOpt('num_total_pats', default=3,
help='Number of available PAT addresses for bastions'),
cfg.IntOpt('num_pat_bastions_per_server', default=2,
@ -87,6 +90,7 @@ auth = v3.Password(auth_url=CONF.tatu.auth_url,
password=CONF.tatu.password,
project_id=CONF.tatu.project_id)
session = keystone_session.Session(auth=auth)
KEYSTONE = keystone_client.Client(session=session)
NOVA = nova_client.Client('2', session=session)
NEUTRON = neutron_client.Client(session=session)
DESIGNATE = designate_client.Client(session=session)

View File

@ -11,7 +11,7 @@
# under the License.
from Crypto.PublicKey import RSA
from datetime import datetime
from datetime import datetime, timedelta
import falcon
import sqlalchemy as sa
from sqlalchemy.exc import IntegrityError
@ -19,7 +19,8 @@ from sqlalchemy.ext.declarative import declarative_base
import sshpubkeys
from tatu.castellano import get_secret, store_secret
from tatu.utils import generateCert, revokedKeysBase64, random_uuid
from tatu.ks_utils import getProjectRoleNamesForUser
from tatu.utils import canonical_uuid_string, generateCert, revokedKeysBase64, random_uuid
Base = declarative_base()
@ -28,8 +29,11 @@ class Authority(Base):
__tablename__ = 'authorities'
auth_id = sa.Column(sa.String(36), primary_key=True)
name = sa.Column(sa.String(36))
user_key = sa.Column(sa.Text)
host_key = sa.Column(sa.Text)
user_pub_key = sa.Column(sa.Text)
host_pub_key = sa.Column(sa.Text)
def getAuthority(session, auth_id):
@ -40,22 +44,28 @@ def getAuthorities(session):
return session.query(Authority)
def getAuthUserKey(auth):
return get_secret(auth.user_key)
def getAuthHostKey(auth):
return get_secret(auth.host_key)
return
def createAuthority(session, auth_id):
user_key = RSA.generate(2048).exportKey('PEM')
def _newPubPrivKeyPair():
k = RSA.generate(2048)
return k.publickey().exportKey('OpenSSH'), k.exportKey('PEM')
def createAuthority(session, auth_id, name):
user_pub_key, user_key = _newPubPrivKeyPair()
user_secret_id = store_secret(user_key)
host_key = RSA.generate(2048).exportKey('PEM')
host_pub_key, host_key = _newPubPrivKeyPair()
host_secret_id = store_secret(host_key)
auth = Authority(auth_id=auth_id,
auth = Authority(
auth_id=auth_id,
name=name,
user_key=user_secret_id,
host_key=host_secret_id)
host_key=host_secret_id,
user_pub_key = user_pub_key,
host_pub_key = host_pub_key,
)
session.add(auth)
try:
session.commit()
@ -64,10 +74,20 @@ def createAuthority(session, auth_id):
return auth
def deleteAuthority(session, auth_id):
session.delete(getAuthority(session, auth_id))
session.commit()
class UserCert(Base):
__tablename__ = 'user_certs'
serial = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
user_name = sa.Column(sa.String(20))
principals = sa.Column(sa.String(100))
created_at = sa.Column(sa.DateTime, default=lambda: datetime.utcnow())
expires_at = sa.Column(sa.DateTime, default=lambda: datetime.utcnow()
+ timedelta(days=365))
user_id = sa.Column(sa.String(36))
fingerprint = sa.Column(sa.String(60))
auth_id = sa.Column(sa.String(36), sa.ForeignKey('authorities.auth_id'))
@ -94,7 +114,7 @@ def getUserCerts(session):
return session.query(UserCert)
def createUserCert(session, user_id, auth_id, pub):
def createUserCert(session, user_id, user_name, auth_id, pub):
# Retrieve the authority's private key and generate the certificate
auth = getAuthority(session, auth_id)
if auth is None:
@ -104,15 +124,20 @@ def createUserCert(session, user_id, auth_id, pub):
certRecord = getUserCert(session, user_id, fingerprint)
if certRecord is not None:
return certRecord
principals = getProjectRoleNamesForUser(auth_id, user_id)
user = UserCert(
user_id=user_id,
user_name=user_name,
principals=','.join(principals),
fingerprint=fingerprint,
auth_id=auth_id,
)
session.add(user)
session.flush()
user.cert = generateCert(getAuthUserKey(auth), pub, user=True,
principals='admin,root', serial=user.serial)
user.cert = generateCert(
get_secret(auth.user_key), pub, user=True, principal_list=principals,
serial=user.serial, days_valid=365, identity=user_name
)
if user.cert is None:
raise falcon.HTTPInternalServerError(
"Failed to generate the certificate")
@ -136,9 +161,32 @@ def getRevokedKeysBase64(session, auth_id):
description='No Authority found with that ID')
serials = [k.serial for k in session.query(RevokedKey).filter(
RevokedKey.auth_id == auth_id)]
user_key = RSA.importKey(getAuthUserKey(auth))
user_pub_key = user_key.publickey().exportKey('OpenSSH')
return revokedKeysBase64(user_pub_key, serials)
return revokedKeysBase64(auth.user_pub_key, serials)
def revokeUserCert(session, cert):
cert.revoked = True
session.add(cert)
session.add(db.RevokedKey(cert.auth_id, serial=cert.serial))
session.commit()
def revokeUserCerts(session, user_id):
# TODO(Pino): send an SQL statement instead of retrieving and iterating?
for u in session.query(UserCert).filter(UserCert.user_id == user_id):
u.revoked = True
session.add(u)
session.add(RevokedKey(u.auth_id, serial=u.serial))
session.commit()
def revokeUserCertsInProject(session, user_id, project_id):
# TODO(Pino): send an SQL statement instead of retrieving and iterating?
for u in session.query(UserCert).filter(UserCert.user_id == user_id).filter(UserCert.auth_id == project_id):
u.revoked = True
session.add(u)
session.add(RevokedKey(u.auth_id, serial=u.serial))
session.commit()
def revokeUserKey(session, auth_id, serial=None, key_id=None, cert=None):
@ -156,12 +204,6 @@ def revokeUserKey(session, auth_id, serial=None, key_id=None, cert=None):
raise falcon.HTTPBadRequest(
"Incorrect CA ID for serial # {}".format(serial))
ser = serial
elif key_id is not None:
# TODO(pino): look up the UserCert by key id and get the serial number
pass
elif cert is not None:
# TODO(pino): Decode the cert, validate against UserCert and get serial
pass
if ser is None or userCert is None:
raise falcon.HTTPBadRequest("Cannot identify which Cert to revoke.")
@ -208,11 +250,47 @@ def createToken(session, host_id, auth_id, hostname):
return token
class Host(Base):
__tablename__ = 'hosts'
id = sa.Column(sa.String(36), primary_key=True)
name = sa.Column(sa.String(36))
pat_bastions = sa.Column(sa.String(70)) # max 3 ip:port pairs
srv_url = sa.Column(sa.String(100)) # _ssh._tcp.<host>.<project>.<zone>
def createHost(session, id, name, pat_bastions, srv_url):
host = Host(id=id, name=name, pat_bastions=pat_bastions, srv_url=srv_url)
session.add(host)
try:
session.commit()
except IntegrityError:
raise falcon.HTTPConflict("Failed to create SSH host record for {}."
.format(name))
return host
def getHost(session, id):
return session.query(Host).get(id)
def getHosts(session):
return session.query(Host)
def deleteHost(session, host):
session.delete(host)
session.commit()
class HostCert(Base):
__tablename__ = 'host_certs'
host_id = sa.Column(sa.String(36), primary_key=True)
fingerprint = sa.Column(sa.String(60), primary_key=True)
created_at = sa.Column(sa.DateTime, default=lambda: datetime.utcnow())
expires_at = sa.Column(sa.DateTime, default=lambda: datetime.utcnow()
+ timedelta(days=365))
auth_id = sa.Column(sa.String(36), sa.ForeignKey('authorities.auth_id'))
token_id = sa.Column(sa.String(36), sa.ForeignKey('tokens.token_id'))
pubkey = sa.Column(sa.Text)
@ -258,7 +336,8 @@ def createHostCert(session, token_id, host_id, pub):
certRecord = session.query(HostCert).get([host_id, fingerprint])
if certRecord is not None:
raise falcon.HTTPConflict('This public key is already signed.')
cert = generateCert(getAuthHostKey(auth), pub, user=False)
cert = generateCert(get_secret(auth.host_key), pub, user=False,
days_valid=365, identity=token.hostname)
if cert == '':
raise falcon.HTTPInternalServerError(
"Failed to generate the certificate")

View File

@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
from designateclient.exceptions import Conflict
from oslo_log import log as logging
@ -48,24 +47,30 @@ def sync_bastions(ip_addresses):
register_bastion(ip)
def get_srv_url(hostname, project_id):
return '_ssh._tcp.{}.{}.{}'.format(hostname, project_id[:8], ZONE['name'])
def get_srv_url(hostname, project):
return '_ssh._tcp.{}.{}.{}'.format(hostname, project, ZONE['name'])
def add_srv_records(hostname, project_id, port_ip_tuples):
def delete_srv_records(srv_url):
try:
DESIGNATE.recordsets.delete(ZONE['id'], srv_url)
except:
pass
def add_srv_records(hostname, project, ip_port_tuples):
records = []
for port, ip in port_ip_tuples:
for ip, port in ip_port_tuples:
bastion = bastion_name_from_ip(ip)
# SRV record format is: priority weight port A-name
records.append(
'10 50 {} {}'.format(port, bastion))
srv_url = get_srv_url(hostname, project)
try:
DESIGNATE.recordsets.create(ZONE['id'],
get_srv_url(hostname, project_id),
'SRV', records)
DESIGNATE.recordsets.create(ZONE['id'], srv_url, 'SRV', records)
except Conflict:
pass
return srv_url
_setup_zone()

46
tatu/ks_utils.py Normal file
View File

@ -0,0 +1,46 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tatu.config import KEYSTONE as ks
LOG = logging.getLogger(__name__)
def getProjectRoleNames(proj_id):
role_ids = set()
for r in ks.role_assignments.list(project=proj_id):
role_ids.add(r.role['id'])
return getRoleNamesForIDs(list(role_ids))
def getProjectRoleNamesForUser(proj_id, user_id):
role_ids = []
for r in ks.role_assignments.list(user=user_id, project=proj_id,
effective=True):
role_ids.append(r.role['id'])
return getRoleNamesForIDs(role_ids)
def getRoleNamesForIDs(ids):
names = []
for id in ids:
#TODO(pino): use a cache?
names.append(ks.roles.get(id).name)
return names
def getUserNameForID(id):
return ks.users.get(id).name
def getProjectNameForID(id):
return ks.projects.get(id).name
def getUserIdsByGroupId(id):
return [u.id for u in ks.users.list(group=id)]

View File

@ -10,32 +10,38 @@
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_serialization import jsonutils
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
import sys
from tatu import config # sets up all required config
import time
import uuid
from tatu.db.models import createAuthority
from tatu.db.persistence import get_url
from tatu import ks_utils
from tatu.config import CONF
from tatu.config import KEYSTONE as ks
from tatu.config import NOVA as nova
from tatu.db import models as db
from tatu.dns import delete_srv_records
from tatu.pat import deletePatEntries, string_to_ip_port_tuples
from tatu.utils import canonical_uuid_string
LOG = logging.getLogger(__name__)
class NotificationEndpoint(object):
filter_rule = oslo_messaging.NotificationFilter(
publisher_id='^identity.*',
event_type='^identity.project.created')
publisher_id='^identity.*|^compute.*',
event_type='^identity.project.(created|deleted)|'
'^identity.user.deleted|'
'^identity.role_assignment.deleted|'
'^compute.instance.delete.end')
#TODO(pino): what about user removal from a project? (rather than deletion)
def __init__(self):
self.engine = create_engine(get_url())
# Base.metadata.create_all(self.engine)
self.Session = scoped_session(sessionmaker(self.engine))
def __init__(self, engine):
self.Session = scoped_session(sessionmaker(engine))
def info(self, ctxt, publisher_id, event_type, payload, metadata):
LOG.debug('notification:')
@ -44,37 +50,169 @@ class NotificationEndpoint(object):
LOG.debug("publisher: %s, event: %s, metadata: %s", publisher_id,
event_type, metadata)
if event_type == 'identity.project.created':
proj_id = payload.get('resource_info')
LOG.debug("New project with ID {} created "
"in Keystone".format(proj_id))
se = self.Session()
if event_type == 'identity.project.created':
proj_id = canonical_uuid_string(payload.get('resource_info'))
name = ks_utils.getProjectNameForID(proj_id)
_createAuthority(self.Session, proj_id, name)
elif event_type == 'identity.project.deleted':
# Assume all the users and instances must have been removed.
proj_id = canonical_uuid_string(payload.get('resource_info'))
_deleteAuthority(self.Session,
db.getAuthority(self.Session(), proj_id))
elif event_type == 'identity.role_assignment.deleted':
users = []
if 'user' in payload:
users = [payload['user']]
else:
users = ks_utils.getUserIdsByGroupId(payload['group'])
# TODO: look for domain if project isn't available
proj_id = payload['project']
for user_id in users:
try:
auth_id = str(uuid.UUID(proj_id, version=4))
createAuthority(se, auth_id)
db.revokeUserCertsInProject(se, user_id, proj_id)
except Exception as e:
LOG.error(
"Failed to create Tatu CA for new project with ID {} "
"due to exception {}".format(proj_id, e))
"Failed to revoke user {} certificates in project {} "
"after role assignment change, due to exception {}"
.format(user_id, proj_id, e))
se.rollback()
self.Session.remove()
elif event_type == 'identity.user.deleted':
user_id = payload.get('resource_info')
LOG.debug("User with ID {} deleted "
"in Keystone".format(user_id))
try:
db.revokeUserCerts(se, user_id)
# TODO(pino): also prevent generating new certs for this user?
except Exception as e:
LOG.error(
"Failed to revoke all certs for deleted user with ID {} "
"due to exception {}".format(user_id, e))
se.rollback()
self.Session.remove()
elif event_type == 'compute.instance.delete.end':
instance_id = payload.get('instance_id')
host = db.getHost(se, instance_id)
if host is not None:
_deleteHost(self.Session, host)
# TODO(Pino): record the deletion to prevent new certs generation?
pass
else:
LOG.error("Status update or unknown")
LOG.error("Unknown update.")
# TODO(pino): listen to host delete notifications, clean up PATs and DNS
# TODO(pino): Listen to user deletions and revoke their certs
def _createAuthority(session_factory, auth_id, name):
se = session_factory()
if db.getAuthority(se, auth_id) is not None:
return
try:
db.createAuthority(se, auth_id, name)
LOG.info("Created CA for project {} with ID {}".format(name, auth_id))
except Exception as e:
LOG.error(
"Failed to create CA for project {} with ID {} "
"due to exception {}".format(name, auth_id, e))
se.rollback()
session_factory.remove()
def _deleteAuthority(session_factory, auth):
se = session_factory()
try:
LOG.info(
"Deleting CA for project {} with ID {} - not in Keystone"
.format(auth.name, auth.auth_id))
db.deleteAuthority(se, auth.auth_id)
except Exception as e:
LOG.error(
"Failed to delete Tatu CA for project {} with ID {} "
"due to exception {}".format(proj.name, auth_id, e))
se.rollback()
session_factory.remove()
def _deleteHost(session_factory, host):
LOG.debug("Clean up DNS and PAT for deleted instance {} with ID {}"
.format(host.name, host.id))
delete_srv_records(host.srv_url)
deletePatEntries(string_to_ip_port_tuples(host.pat_bastions))
se = session_factory()
try:
LOG.info(
"Deleting Host {} with ID {} - not in Keystone"
.format(host.name, host.id))
se.delete(host)
se.commit()
except:
LOG.error(
"Failed to delete Host {} with ID {} - not in Keystone"
.format(host.name, host.id))
se.rollback()
session_factory.remove()
def sync(engine):
session_factory = scoped_session(sessionmaker(engine))
ks_project_ids = set()
LOG.info("Add CAs for new projects in Keystone.")
for proj in ks.projects.list():
ks_project_ids.add(canonical_uuid_string(proj.id))
_createAuthority(session_factory,
canonical_uuid_string(proj.id),
proj.name)
# Iterate through all CAs in Tatu. Delete any that don't have a
# corresponding project in Keystone.
LOG.info("Remove CAs for projects that were deleted from Keystone.")
for auth in db.getAuthorities(session_factory()):
if auth.auth_id not in ks_project_ids:
_deleteAuthority(session_factory, auth)
ks_user_ids = set()
for user in ks.users.list():
ks_user_ids.add(user.id)
LOG.info("Revoke user certificates if user was deleted or lost a role.")
for cert in db.getUserCerts(session_factory()):
# Invalidate the cert if the user was removed from Keystone
if cert.user_id not in ks_user_ids:
db.revokeUserCert(cert)
continue
# Invalidate the cert if it has any principals that aren't current
p = ks_utils.getProjectRoleNamesForUser(cert.auth_id, cert.user_id)
cert_p = cert.principals.split(",")
if len(set(cert_p) - set(p)) > 0:
se = session_factory()
db.revokeUserCert(cert)
# Iterate through all the instance IDs in Tatu. Clean up DNS and PAT for
# any that no longer exist in Nova.
LOG.info("Delete DNS and PAT resources of any server that was deleted.")
instance_ids = set()
for instance in nova.servers.list(search_opts={'all_tenants': True}):
instance_ids.add(instance.id)
for host in db.getHosts(session_factory()):
if host.id not in instance_ids:
_deleteHost(session_factory, host)
def main():
transport = oslo_messaging.get_notification_transport(cfg.CONF)
transport = oslo_messaging.get_notification_transport(CONF)
targets = [oslo_messaging.Target(topic='notifications')]
endpoints = [NotificationEndpoint()]
storage_engine = create_engine(CONF.tatu.sqlalchemy_engine)
endpoints = [NotificationEndpoint(storage_engine)]
server = oslo_messaging.get_notification_listener(transport,
targets,
endpoints,
executor='threading')
# At startup, do an overall sync.
sync(storage_engine)
LOG.info("Starting notification watcher daemon")
server.start()
try:

View File

@ -20,10 +20,17 @@ import random
from tatu import dns
from tatu.config import CONF, NEUTRON, NOVA, DRAGONFLOW
from tatu.db import models as tatu_db
from tatu.utils import dash_uuid
LOG = logging.getLogger(__name__)
#TODO(pino): periodically refresh this list
PATS = DRAGONFLOW.get_all(PAT)
def getAllPats():
return DRAGONFLOW.get_all(PAT)
def _sync_pats():
# TODO(pino): re-bind PATs when hypervisors fail (here and on notification)
all_chassis = DRAGONFLOW.get_all(Chassis)
@ -84,34 +91,27 @@ def _df_find_lrouter_by_lport(lport):
return None
def get_port_ip_tuples(instance_id, fixed_lport):
port_ip_tuples = []
all_entries = DRAGONFLOW.get_all(PATEntry)
LOG.debug('Found {} PATEntries: {}'.format(len(all_entries), all_entries))
try:
server = NOVA.servers.get(instance_id)
except:
return []
ifaces = server.interface_list()
for iface in ifaces:
lport = DRAGONFLOW.get(LogicalPort(id=iface.port_id))
lrouter = _df_find_lrouter_by_lport(lport)
if lrouter is None: continue
for entry in all_entries:
if entry.lport.id == lport.id and entry.fixed_l4_port == fixed_lport:
pat = DRAGONFLOW.get(PAT(id=entry.pat.id))
port_ip_tuples.append((entry.pat_l4_port, str(pat.ip_address)))
if port_ip_tuples: break
return port_ip_tuples
def ip_port_tuples_to_string(tuples):
return ','.join('{}:{}'.format(t[0], t[1]) for t in tuples)
def create_pat_entries(sql_session, instance_id, fixed_l4_port,
def string_to_ip_port_tuples(s):
return [tuple(ip_port.split(':')) for ip_port in s.split(',')]
def deletePatEntries(ip_port_tuples):
LOG.debug("Delete PATEntry for each of tuples: {}".format(ip_port_tuples))
pat_entries = DRAGONFLOW.get_all(PATEntry)
tuples = set(ip_port_tuples)
for entry in pat_entries:
if (entry.pat.id, entry.pat_l4_port) in tuples:
DRAGONFLOW.delete(entry)
def create_pat_entries(sql_session, instance_id,
fixed_l4_port=CONF.tatu.ssh_port,
num=CONF.tatu.num_pat_bastions_per_server):
port_ip_tuples = get_port_ip_tuples(instance_id, fixed_l4_port)
LOG.debug('Found {} tuples: {}'.format(len(port_ip_tuples), port_ip_tuples))
if port_ip_tuples: return port_ip_tuples
LOG.debug('Creating new tuples.')
server = NOVA.servers.get(instance_id)
ip_port_tuples = []
server = NOVA.servers.get(dash_uuid(instance_id))
ifaces = server.interface_list()
for iface in ifaces:
lport = DRAGONFLOW.get(LogicalPort(id=iface.port_id))
@ -135,10 +135,10 @@ def create_pat_entries(sql_session, instance_id, fixed_l4_port,
lrouter = lrouter,
)
DRAGONFLOW.create(pat_entry)
port_ip_tuples.append((pat_l4_port, str(pat.ip_address)))
ip_port_tuples.append((str(pat.ip_address), pat_l4_port))
# if we got here, we now have the required pat_entries
break
return port_ip_tuples
return ip_port_tuples
_sync_pats()

View File

@ -1,53 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1.identity import v3 as ks_v3
from keystoneauth1 import session as ks_session
from keystoneclient.v3 import client as ks_client_v3
from oslo_log import log as logging
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from tatu import config # sets up all required config
from tatu.db.models import Base, createAuthority, getAuthority
from tatu.db.persistence import get_url
import uuid
LOG = logging.getLogger(__name__)
auth = ks_v3.Password(auth_url='http://localhost/identity/v3',
user_id='fab01a1f2a7749b78a53dffe441a1879',
password='pinot',
project_id='2e6c998ad16f4045821304470a57d160')
keystone = ks_client_v3.Client(session=ks_session.Session(auth=auth))
projects = keystone.projects.list()
engine = create_engine(get_url())
Base.metadata.create_all(engine)
Session = scoped_session(sessionmaker(engine))
LOG.debug("Creating CAs for {} Keystone projects.".format(len(projects)))
for proj in projects:
se = Session()
try:
auth_id = str(uuid.UUID(proj.id, version=4))
if getAuthority(se, auth_id) is None:
createAuthority(se, auth_id)
LOG.info("Created CA for project {} with ID {}".format(proj.name,
auth_id))
else:
LOG.info("CA already exists for project {}".format(proj.name))
except Exception as e:
LOG.error(
"Failed to create Tatu CA for project {} with ID {} "
"due to exception {}".format(proj.name, auth_id, e))
se.rollback()
Session.remove()

View File

@ -19,10 +19,23 @@ from tempfile import mkdtemp
def random_uuid():
return str(uuid.uuid4())
return uuid.uuid4().hex
def generateCert(auth_key, entity_key, user=True, principals='root', serial=0):
def dash_uuid(id):
return str(uuid.UUID(id, version=4))
def canonical_uuid_string(id):
return uuid.UUID(id, version=4).hex
def datetime_to_string(dt):
return dt.strftime("%Y-%m-%dT%H:%M:%S.%f")
def generateCert(auth_key, entity_key, user=True, principal_list=[], serial=0,
days_valid=180, identity=''):
# Temporarily write the authority private key, entity public key to files
temp_dir = mkdtemp()
ca_file = '/'.join([temp_dir, 'ca_key'])
@ -36,16 +49,21 @@ def generateCert(auth_key, entity_key, user=True, principals='root', serial=0):
text_file.write(auth_key)
with open(pub_file, "w", 0o644) as text_file:
text_file.write(entity_key)
args = ['ssh-keygen', '-s', ca_file, '-I', 'testID', '-V',
'-1d:+365d', '-z', str(serial)]
if user:
args.extend(['-n', principals, pub_file])
else:
args.extend(['-h', pub_file])
args = ['ssh-keygen', '-s', ca_file,
'-V', '-1d:+{}d'.format(days_valid)]
if serial:
args.extend(['-z', str(serial)])
if identity:
args.extend(['-I', '{}_{}'.format(identity, serial)])
if principal_list:
args.extend(['-n', ','.join(principal_list)])
if not user:
args.append('-h')
args.append(pub_file)
subprocess.check_output(args, stderr=subprocess.STDOUT)
# Read the contents of the certificate file
with open(cert_file, 'r') as text_file:
cert = text_file.read()
cert = text_file.read().strip('\n')
finally:
shutil.rmtree(temp_dir)
return cert