Add support for deploying Kafka

Projects which may use Kafka include Monasca and Oslo
messaging. Initially it will be possible to
deploy a single Kafka cluster which may be shared
among clients. Support for running multiple Kafka
clusters may be added in a future change. This
change also configures logging for Kafka server,
state-change and controller logs.

Partially-Implements: blueprint monasca-roles
Change-Id: Iab8d200c2450238f3c0c708d1f4184490f6e6284
This commit is contained in:
Doug Szumski 2018-02-26 12:02:19 +00:00
parent f249a1b39a
commit 6647ed818a
24 changed files with 248 additions and 1 deletions

View File

@ -210,6 +210,8 @@ ironic_inspector_port: "5050"
iscsi_port: "3260" iscsi_port: "3260"
kafka_port: "9092"
karbor_api_port: "8799" karbor_api_port: "8799"
keystone_public_port: "5000" keystone_public_port: "5000"
@ -441,6 +443,7 @@ enable_ironic: "no"
enable_ironic_pxe_uefi: "no" enable_ironic_pxe_uefi: "no"
enable_iscsid: "{{ (enable_cinder | bool and enable_cinder_backend_iscsi | bool) or enable_ironic | bool }}" enable_iscsid: "{{ (enable_cinder | bool and enable_cinder_backend_iscsi | bool) or enable_ironic | bool }}"
enable_karbor: "no" enable_karbor: "no"
enable_kafka: "no"
enable_kuryr: "no" enable_kuryr: "no"
enable_magnum: "no" enable_magnum: "no"
enable_manila: "no" enable_manila: "no"
@ -487,7 +490,7 @@ enable_trove_singletenant: "no"
enable_vitrage: "no" enable_vitrage: "no"
enable_vmtp: "no" enable_vmtp: "no"
enable_watcher: "no" enable_watcher: "no"
enable_zookeeper: "no" enable_zookeeper: "{{ enable_kafka | bool }}"
enable_zun: "no" enable_zun: "no"
ovs_datapath: "{{ 'netdev' if enable_ovs_dpdk | bool else 'system' }}" ovs_datapath: "{{ 'netdev' if enable_ovs_dpdk | bool else 'system' }}"

View File

@ -56,6 +56,9 @@ monitoring
control control
compute compute
[kafka:children]
control
[karbor:children] [karbor:children]
control control

View File

@ -78,6 +78,9 @@ compute
[influxdb:children] [influxdb:children]
monitoring monitoring
[kafka:children]
control
[karbor:children] [karbor:children]
control control

View File

@ -52,6 +52,7 @@
- "04-openstack-wsgi" - "04-openstack-wsgi"
- "05-libvirt" - "05-libvirt"
- "06-zookeeper" - "06-zookeeper"
- "07-kafka"
notify: notify:
- Restart fluentd container - Restart fluentd container
@ -193,6 +194,7 @@
- { name: "ironic", enabled: "{{ enable_ironic }}" } - { name: "ironic", enabled: "{{ enable_ironic }}" }
- { name: "ironic-inspector", enabled: "{{ enable_ironic }}" } - { name: "ironic-inspector", enabled: "{{ enable_ironic }}" }
- { name: "iscsid", enabled: "{{ enable_iscsid }}" } - { name: "iscsid", enabled: "{{ enable_iscsid }}" }
- { name: "kafka", enabled: "{{ enable_kafka }}" }
- { name: "karbor", enabled: "{{ enable_karbor }}" } - { name: "karbor", enabled: "{{ enable_karbor }}" }
- { name: "keepalived", enabled: "{{ enable_haproxy }}" } - { name: "keepalived", enabled: "{{ enable_haproxy }}" }
- { name: "keystone", enabled: "{{ enable_keystone }}" } - { name: "keystone", enabled: "{{ enable_keystone }}" }

View File

@ -0,0 +1,11 @@
{% set fluentd_dir = 'td-agent' if kolla_base_distro in ['ubuntu', 'debian'] else 'fluentd' %}
<source>
@type tail
path /var/log/kolla/kafka/controller.log, /var/log/kolla/kafka/server.log, /var/log/kolla/kafka/state-change.log
pos_file /var/run/{{ fluentd_dir }}/kafka.pos
tag infra.*
format multiline
format_firstline /^\[\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}\] \S+ .*$/
format1 /^\[(?<Timestamp>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3})\] (?<log_level>\S+) (?<Payload>.*)$/
time_key Timestamp
</source>

View File

@ -0,0 +1,3 @@
"/var/log/kolla/kafka/*.log"
{
}

View File

@ -24,6 +24,7 @@
( 'ironic', enable_ironic ), ( 'ironic', enable_ironic ),
( 'ironic-inspector', enable_ironic ), ( 'ironic-inspector', enable_ironic ),
( 'iscsid', enable_iscsid ), ( 'iscsid', enable_iscsid ),
( 'kafka', enable_kafka ),
( 'karbor', enable_karbor ), ( 'karbor', enable_karbor ),
( 'keepalived', enable_haproxy ), ( 'keepalived', enable_haproxy ),
( 'keystone', enable_keystone ), ( 'keystone', enable_keystone ),

View File

@ -53,6 +53,12 @@
"owner": "{{ fluentd_user }}", "owner": "{{ fluentd_user }}",
"perm": "0600" "perm": "0600"
}, },
{
"source": "{{ container_config_directory }}/input/07-kafka.conf",
"dest": "{{ fluentd_dir }}/input/07-kafka.conf",
"owner": "{{ fluentd_user }}",
"perm": "0600"
},
{# Copy all configuration files in filter/ directory to include #} {# Copy all configuration files in filter/ directory to include #}
{# custom filter configs. #} {# custom filter configs. #}
{ {

View File

@ -0,0 +1,31 @@
---
kafka_services:
kafka:
container_name: kafka
group: kafka
enabled: true
image: "{{ kafka_image_full }}"
environment:
LOG_DIR: "{{ kafka_log_dir }}"
KAFKA_HEAP_OPTS: "{{ kafka_heap_opts }}"
volumes:
- "{{ node_config_directory }}/kafka/:{{ container_config_directory }}/"
- "/etc/localtime:/etc/localtime:ro"
- "kafka:/var/lib/kafka/data"
- "kolla_logs:/var/log/kolla/"
####################
# Kafka
####################
kafka_cluster_name: "kolla_kafka"
kafka_log_dir: "/var/log/kolla/kafka"
kafka_heap_opts: "-Xmx1G -Xms1G"
kafka_zookeeper: "{% for host in groups['zookeeper'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ zookeeper_client_port }}{% if not loop.last %},{% endif %}{% endfor %}"
####################
# Docker
####################
kafka_install_type: "{{ kolla_install_type }}"
kafka_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kafka_install_type }}-kafka"
kafka_tag: "{{ openstack_release }}"
kafka_image_full: "{{ kafka_image }}:{{ kafka_tag }}"

View File

@ -0,0 +1,22 @@
---
- name: Restart kafka container
vars:
service_name: "kafka"
service: "{{ kafka_services[service_name] }}"
config_json: "{{ kafka_config_jsons.results|selectattr('item.key', 'equalto', service_name)|first }}"
kafka_conf: "{{ kafka_confs.results|selectattr('item.key', 'equalto', service_name)|first }}"
kafka_container: "{{ check_kafka_containers.results|selectattr('item.key', 'equalto', service_name)|first }}"
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
environment: "{{ service.environment }}"
volumes: "{{ service.volumes }}"
when:
- action != "config"
- inventory_hostname in groups[service.group]
- service.enabled | bool
- config_json.changed | bool
or kafka_conf.changed | bool
or kafka_container.changed | bool

View File

@ -0,0 +1,3 @@
---
dependencies:
- { role: common }

View File

@ -0,0 +1 @@
---

View File

@ -0,0 +1,62 @@
---
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item.key }}"
state: "directory"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
recurse: yes
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ kafka_services }}"
- name: Copying over config.json files for services
template:
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
register: kafka_config_jsons
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ kafka_services }}"
notify:
- Restart kafka container
- name: Copying over kafka config
merge_configs:
sources:
- "{{ role_path }}/templates/kafka.server.properties.j2"
- "{{ node_custom_config }}/kafka.server.properties"
- "{{ node_custom_config }}/{{ item.key }}/{{ inventory_hostname }}/kafka.server.properties"
dest: "{{ node_config_directory }}/{{ item.key }}/kafka.server.properties"
mode: "0660"
become: true
register: kafka_confs
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ kafka_services }}"
notify:
- Restart kafka container
- name: Check kafka containers
kolla_docker:
action: "compare_container"
common_options: "{{ docker_common_options }}"
name: "{{ item.value.container_name }}"
image: "{{ item.value.image }}"
volumes: "{{ item.value.volumes }}"
environment: "{{ item.value.environment }}"
register: check_kafka_containers
when:
- action != "config"
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ kafka_services }}"
notify:
- Restart kafka container

View File

@ -0,0 +1,5 @@
---
- include: config.yml
- name: Flush handlers
meta: flush_handlers

View File

@ -0,0 +1,2 @@
---
- include: "{{ action }}.yml"

View File

@ -0,0 +1,17 @@
---
- name: Get container facts
kolla_container_facts:
name:
- kafka
register: container_facts
- name: Checking free port for Kafka
wait_for:
host: "{{ api_interface_address }}"
port: "{{ kafka_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- container_facts['kafka'] is not defined
- inventory_hostname in groups['kafka']

View File

@ -0,0 +1,10 @@
---
- name: Pulling kafka images
kolla_docker:
action: "pull_image"
common_options: "{{ docker_common_options }}"
image: "{{ item.value.image }}"
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ kafka_services }}"

View File

@ -0,0 +1,2 @@
---
- include: deploy.yml

View File

@ -0,0 +1,5 @@
---
- include: config.yml
- name: Flush handlers
meta: flush_handlers

View File

@ -0,0 +1,23 @@
{
"command": "/opt/kafka/bin/kafka-server-start.sh /etc/kafka/kafka.server.properties",
"config_files": [
{
"source": "{{ container_config_directory }}/kafka.server.properties",
"dest": "/etc/kafka/kafka.server.properties",
"owner": "kafka",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/lib/kafka",
"owner": "kafka:kafka",
"recurse": true
},
{
"path": "/var/log/kolla/kafka",
"owner": "kafka:kafka",
"recurse": true
}
]
}

View File

@ -0,0 +1,17 @@
listeners=PLAINTEXT://{{ api_interface_address }}:{{ kafka_port }}
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/var/lib/kafka/data
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=3
transaction.state.log.replication.factor=3
transaction.state.log.min.isr=3
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect={{ kafka_zookeeper }}
zookeeper.connection.timeout.ms=6000

View File

@ -253,6 +253,15 @@
tags: ceph, tags: ceph,
when: enable_ceph | bool } when: enable_ceph | bool }
- name: Apply role kafka
gather_facts: false
hosts: kafka
serial: '{{ serial|default("0") }}'
roles:
- { role: kafka,
tags: kafka,
when: enable_kafka | bool }
- name: Apply role karbor - name: Apply role karbor
gather_facts: false gather_facts: false
hosts: karbor hosts: karbor

View File

@ -190,6 +190,7 @@ kolla_internal_vip_address: "10.10.10.254"
#enable_influxdb: "no" #enable_influxdb: "no"
#enable_ironic: "no" #enable_ironic: "no"
#enable_ironic_pxe_uefi: "no" #enable_ironic_pxe_uefi: "no"
#enable_kafka: "no"
#enable_karbor: "no" #enable_karbor: "no"
#enable_kuryr: "no" #enable_kuryr: "no"
#enable_magnum: "no" #enable_magnum: "no"

View File

@ -0,0 +1,5 @@
---
features:
- Add a role for deploying Apache Kafka, a distributed streaming platform.
See https://kafka.apache.org/ for more details. Requires Apache Zookeeper
to be configured.