Adding CFME All-In-One Install and support.
+ Disable epel after graphite install and after grafana install + Add readme for cfme-all-in-one + Allow port to be changed for graphite/grafana + Automated adding graphite as data source + Removed grafana api key usage from dashboards-generic, dashboards-openstack + wait for grafana to be ready for new data source Change-Id: I97235d60032d60061790f99d6d811ecc9d6f4c36
This commit is contained in:
parent
aa8751cc3f
commit
59bf86c75c
45
ansible/README.cfme-allinone.md
Normal file
45
ansible/README.cfme-allinone.md
Normal file
@ -0,0 +1,45 @@
|
||||
# Setting up a CFME or ManageIQ VM for All-In-One Performance Monitoring
|
||||
|
||||
1. Deploy ManageIQ/CFME appliance
|
||||
2. Add additional disk to host Graphite's whisper database, mount disk at /var/lib/carbon
|
||||
3. Clone browbeat
|
||||
```
|
||||
[root@manageiq ~]# git clone https://github.com/jtaleric/browbeat.git
|
||||
[root@manageiq ~]# cd browbeat/ansible
|
||||
```
|
||||
4. Create ansible inventory file
|
||||
```
|
||||
[graphite]
|
||||
localhost ansible_connection=local
|
||||
|
||||
[grafana]
|
||||
localhost ansible_connection=local
|
||||
|
||||
[cfme-all-in-one]
|
||||
localhost ansible_connection=local
|
||||
```
|
||||
5. Install ansible
|
||||
```
|
||||
[root@manageiq ansible]# easy_install pip
|
||||
[root@manageiq ansible]# yum install -y python-devel gcc-c++
|
||||
[root@manageiq ansible]# pip install ansible
|
||||
```
|
||||
6. Setup installation variables at install/group_vars/all by modifying following variables
|
||||
```
|
||||
graphite_host: localhost
|
||||
graphite_port: 9000
|
||||
graphite_prefix: manageiq
|
||||
grafana_host: localhost
|
||||
grafana_port: 9001
|
||||
```
|
||||
7. Run playbooks for collectd/graphite/grafana install
|
||||
```
|
||||
[root@manageiq ansible]# ansible-playbook -i hosts install/graphite.yml
|
||||
[root@manageiq ansible]# ansible-playbook -i hosts install/grafana.yml
|
||||
[root@manageiq ansible]# ansible-playbook -i hosts install/collectd-generic.yml --tags="cfme-all-in-one"
|
||||
```
|
||||
8. Upload dashboards via ansible
|
||||
```
|
||||
[root@manageiq ansible]# ansible-playbook -i hosts install/dashboards-generic.yml
|
||||
```
|
||||
9. Enjoy your now performance monitored CFME/ManageIQ appliance, view grafana dashboards at http://<manageiq ip address>:9001/
|
@ -6,6 +6,7 @@
|
||||
#
|
||||
# ansible-playbook -i hosts install/collectd-generic.yml --tags="baremetal"
|
||||
# ansible-playbook -i hosts install/collectd-generic.yml --tags="guest"
|
||||
# ansible-playbook -i hosts install/collectd-generic.yml --tags="cfme-all-in-one"
|
||||
# ansible-playbook -i hosts install/collectd-generic.yml --tags="cfme-vmdb"
|
||||
# ansible-playbook -i hosts install/collectd-generic.yml --tags="cfme"
|
||||
# ansible-playbook -i hosts install/collectd-generic.yml --tags="graphite"
|
||||
@ -30,6 +31,15 @@
|
||||
- collectd-generic
|
||||
tags: guest
|
||||
|
||||
# Cloud Forms Database appliances with Graphite/Grafana
|
||||
- hosts: cfme-all-in-one
|
||||
remote_user: root
|
||||
vars:
|
||||
config_type: cfme-all-in-one
|
||||
roles:
|
||||
- collectd-generic
|
||||
tags: cfme-all-in-one
|
||||
|
||||
# Cloud Forms Database appliances
|
||||
- hosts: cfme-vmdb
|
||||
remote_user: root
|
||||
|
@ -15,6 +15,8 @@
|
||||
process_list_name: Guest
|
||||
- template_name: cfme
|
||||
process_list_name: CFME
|
||||
- template_name: cfmeallinone
|
||||
process_list_name: CFME-All-In-One
|
||||
# - template_name: cfme
|
||||
# process_list_name: CFME-Amazon
|
||||
# - template_name: cfme
|
||||
|
@ -55,6 +55,7 @@ collectd_compute: false
|
||||
########################################
|
||||
# Graphite Server ip address (Collectd -> Graphite server)
|
||||
graphite_host: 1.1.1.1
|
||||
graphite_port: 80
|
||||
# Graphite prefix / Cloud name used both with graphite and grafana dashboards
|
||||
graphite_prefix: openstack
|
||||
# Graphite username and password for login on the dashboard
|
||||
@ -67,7 +68,8 @@ graphite_password: calvin
|
||||
# Grafana Server IP Address/Port (Can be hosted on the Graphite server)
|
||||
grafana_host: 1.1.1.1
|
||||
grafana_port: 3000
|
||||
grafana_api_key: (Your Grafana API Key)
|
||||
grafana_username: admin
|
||||
grafana_password: admin
|
||||
# Batch number of hosts per row for all-{cpu, memory, disk, network} openstack dashboards
|
||||
dashboards_batch: 20
|
||||
# For use with all-{cpu, memory, disk, network} openstack dashboards, uses the graphite prefix to create dashboards for specific openstack cloud
|
||||
|
@ -0,0 +1,415 @@
|
||||
# Installed by Browbeat Ansible Installer
|
||||
# Config type: {{config_type}}
|
||||
|
||||
# Interval default is 10s
|
||||
Interval {{collectd_interval}}
|
||||
|
||||
# Hostname for this machine, if not defined, use gethostname(2) system call
|
||||
Hostname "{{inventory_hostname}}"
|
||||
|
||||
# Allow collectd to log
|
||||
LoadPlugin syslog
|
||||
|
||||
# Loaded Plugins:
|
||||
LoadPlugin write_graphite
|
||||
LoadPlugin apache
|
||||
LoadPlugin cpu
|
||||
LoadPlugin df
|
||||
LoadPlugin disk
|
||||
LoadPlugin interface
|
||||
LoadPlugin irq
|
||||
LoadPlugin load
|
||||
LoadPlugin memory
|
||||
LoadPlugin processes
|
||||
LoadPlugin postgresql
|
||||
LoadPlugin swap
|
||||
LoadPlugin tail
|
||||
LoadPlugin unixsock
|
||||
LoadPlugin uptime
|
||||
|
||||
# Open unix domain socket for collectdctl
|
||||
<Plugin unixsock>
|
||||
SocketFile "/var/run/collectd-unixsock"
|
||||
SocketGroup "collectd"
|
||||
SocketPerms "0770"
|
||||
DeleteSocket true
|
||||
</Plugin>
|
||||
|
||||
# Graphite Host Configuration
|
||||
<Plugin write_graphite>
|
||||
<Carbon>
|
||||
Host "{{graphite_host}}"
|
||||
Port "2003"
|
||||
Prefix "{{graphite_prefix}}."
|
||||
Protocol "tcp"
|
||||
LogSendErrors true
|
||||
StoreRates true
|
||||
AlwaysAppendDS false
|
||||
EscapeCharacter "_"
|
||||
</Carbon>
|
||||
</Plugin>
|
||||
|
||||
<Plugin apache>
|
||||
<Instance "status">
|
||||
URL "http://127.0.0.1/mod_status?auto"
|
||||
</Instance>
|
||||
</Plugin>
|
||||
|
||||
<Plugin df>
|
||||
ValuesPercentage true
|
||||
</Plugin>
|
||||
|
||||
<Plugin disk>
|
||||
Disk "/^[hsv]d[a-z]+[0-9]?$/"
|
||||
IgnoreSelected false
|
||||
</Plugin>
|
||||
|
||||
<Plugin postgresql>
|
||||
<Query miq_queue_by_state>
|
||||
Statement "select state,count(*) from miq_queue group by state"
|
||||
<Result>
|
||||
Type gauge
|
||||
InstancePrefix "state"
|
||||
InstancesFrom "state"
|
||||
ValuesFrom "count"
|
||||
</Result>
|
||||
</Query>
|
||||
<Query miq_queue_by_zone>
|
||||
Statement "select zone,count(*) from miq_queue group by zone"
|
||||
<Result>
|
||||
Type gauge
|
||||
InstancePrefix "zone"
|
||||
InstancesFrom "zone"
|
||||
ValuesFrom "count"
|
||||
</Result>
|
||||
</Query>
|
||||
<Query miq_queue_by_role>
|
||||
Statement "select role,count(*) from miq_queue group by role"
|
||||
<Result>
|
||||
Type gauge
|
||||
InstancePrefix "role"
|
||||
InstancesFrom "role"
|
||||
ValuesFrom "count"
|
||||
</Result>
|
||||
</Query>
|
||||
<Query miq_queue_by_priority>
|
||||
Statement "select priority,count(*) from miq_queue group by priority"
|
||||
<Result>
|
||||
Type gauge
|
||||
InstancePrefix "priority"
|
||||
InstancesFrom "priority"
|
||||
ValuesFrom "count"
|
||||
</Result>
|
||||
</Query>
|
||||
<Query miq_queue_by_name>
|
||||
Statement "select queue_name,count(*) from miq_queue group by queue_name"
|
||||
<Result>
|
||||
Type gauge
|
||||
InstancePrefix "queue"
|
||||
InstancesFrom "queue_name"
|
||||
ValuesFrom "count"
|
||||
</Result>
|
||||
</Query>
|
||||
<Query miq_queue_by_class>
|
||||
Statement "select class_name,count(*) from miq_queue group by class_name"
|
||||
<Result>
|
||||
Type gauge
|
||||
InstancePrefix "class"
|
||||
InstancesFrom "class_name"
|
||||
ValuesFrom "count"
|
||||
</Result>
|
||||
</Query>
|
||||
<Query miq_queue_by_method>
|
||||
Statement "select method_name,count(*) from miq_queue group by method_name"
|
||||
<Result>
|
||||
Type gauge
|
||||
InstancePrefix "method"
|
||||
InstancesFrom "method_name"
|
||||
ValuesFrom "count"
|
||||
</Result>
|
||||
</Query>
|
||||
<Query miq_queue_qcm>
|
||||
Statement "select REPLACE(CONCAT(queue_name, '-',class_name,'-',method_name), 'ManageIQ::Providers::', '') as queue,count(*) from miq_queue group by queue_name,class_name,method_name"
|
||||
<Result>
|
||||
Type gauge
|
||||
InstancePrefix "all"
|
||||
InstancesFrom "queue"
|
||||
ValuesFrom "count"
|
||||
</Result>
|
||||
</Query>
|
||||
<Query row_stats>
|
||||
Statement "SELECT sum(n_tup_ins) AS inserts, sum(n_tup_upd) AS updates, sum(n_tup_del) AS deletes FROM pg_stat_user_tables"
|
||||
<Result>
|
||||
Type derive
|
||||
InstancePrefix "rows_inserted"
|
||||
ValuesFrom "inserts"
|
||||
</Result>
|
||||
<Result>
|
||||
Type derive
|
||||
InstancePrefix "rows_updated"
|
||||
ValuesFrom "updates"
|
||||
</Result>
|
||||
<Result>
|
||||
Type derive
|
||||
InstancePrefix "rows_deleted"
|
||||
ValuesFrom "deletes"
|
||||
</Result>
|
||||
</Query>
|
||||
<Database vmdb_production>
|
||||
Host "localhost"
|
||||
Port "5432"
|
||||
User "root"
|
||||
Password "smartvm"
|
||||
# Predefined:
|
||||
Query backends
|
||||
Query transactions
|
||||
Query queries
|
||||
Query query_plans
|
||||
Query table_states
|
||||
Query disk_io
|
||||
Query disk_usage
|
||||
# Defined in Query blocks:
|
||||
Query miq_queue_by_state
|
||||
Query miq_queue_by_zone
|
||||
Query miq_queue_by_role
|
||||
Query miq_queue_by_priority
|
||||
Query miq_queue_by_name
|
||||
Query miq_queue_by_class
|
||||
Query miq_queue_by_method
|
||||
Query miq_queue_qcm
|
||||
Query row_stats
|
||||
</Database>
|
||||
</Plugin>
|
||||
|
||||
<Plugin processes>
|
||||
# Appliance processes
|
||||
ProcessMatch "evm_server.rb" ".*evm_server\.rb$"
|
||||
ProcessMatch "evm_watchdog.rb" "/bin/evm_watchdog.rb"
|
||||
Process "evm:dbsync:replicate"
|
||||
Process "appliance_console.rb"
|
||||
|
||||
# Workers
|
||||
ProcessMatch "MiqEventHandler" ".*MiqEventHandler.*"
|
||||
ProcessMatch "MiqGenericWorker" "MiqGenericWorker.*"
|
||||
ProcessMatch "MiqPriorityWorker" "MiqPriorityWorker.*"
|
||||
ProcessMatch "MiqReportingWorker" "MiqReportingWorker.*"
|
||||
ProcessMatch "MiqScheduleWorker" "MiqScheduleWorker.*"
|
||||
ProcessMatch "MiqUiWorker" "MiqUiWorker.*"
|
||||
ProcessMatch "MiqWebServiceWorker" "MiqWebServiceWorker.*"
|
||||
ProcessMatch "MiqReplicationWorker" "MiqReplicationWorker.*"
|
||||
ProcessMatch "MiqEmsMetricsProcessorWorker" "MiqEmsMetricsProcessorWorker.*"
|
||||
ProcessMatch "MiqSmartProxyWorker" "MiqSmartProxyWorker.*"
|
||||
ProcessMatch "MiqVimBrokerWorker" "MiqVimBrokerWorker.*"
|
||||
ProcessMatch "MiqEmsRefreshCoreWorker" "MiqEmsRefreshCoreWorker.*"
|
||||
ProcessMatch "MiqVmdbStorageBridgeWorker" "MiqVmdbStorageBridgeWorker.*"
|
||||
|
||||
|
||||
# Provider Refresh Workers:
|
||||
ProcessMatch "Vmware::InfraManager::RefreshWorker" "Vmware::InfraManager::RefreshWorker.*"
|
||||
ProcessMatch "Redhat::InfraManager::RefreshWorker" "Redhat::InfraManager::RefreshWorker.*"
|
||||
ProcessMatch "Microsoft::InfraManager::RefreshWorker" "Microsoft::InfraManager::RefreshWorker.*"
|
||||
ProcessMatch "Openstack::InfraManager::RefreshWorker" "Openstack::InfraManager::RefreshWorker.*"
|
||||
ProcessMatch "MiqEmsRefreshWorkerVmware" "MiqEmsRefreshWorkerVmware.*"
|
||||
ProcessMatch "MiqEmsRefreshWorkerRedhat" "MiqEmsRefreshWorkerRedhat.*"
|
||||
ProcessMatch "MiqEmsRefreshWorkerMicrosoft" "MiqEmsRefreshWorkerMicrosoft.*"
|
||||
|
||||
ProcessMatch "AnsibleTower::ConfigurationManager::RefreshWorker" "AnsibleTower::ConfigurationManager::RefreshWorker.*"
|
||||
ProcessMatch "Foreman::ConfigurationManager::RefreshWorker" "Foreman::ConfigurationManager::RefreshWorker.*"
|
||||
ProcessMatch "Foreman::ProvisioningManager::RefreshWorker" "Foreman::ProvisioningManager::RefreshWorker.*"
|
||||
ProcessMatch "MiqEmsRefreshWorkerForemanConfiguration" "MiqEmsRefreshWorkerForemanConfiguration.*"
|
||||
ProcessMatch "MiqEmsRefreshWorkerForemanProvisioning" "MiqEmsRefreshWorkerForemanProvisioning.*"
|
||||
|
||||
ProcessMatch "Amazon::CloudManager::RefreshWorker" "Amazon::CloudManager::RefreshWorker.*"
|
||||
ProcessMatch "Azure::CloudManager::RefreshWorker" "Azure::CloudManager::RefreshWorker.*"
|
||||
ProcessMatch "Google::CloudManager::RefreshWorker" "Google::CloudManager::RefreshWorker.*"
|
||||
ProcessMatch "Openstack::CloudManager::RefreshWorker" "Openstack::CloudManager::RefreshWorker.*"
|
||||
ProcessMatch "MiqEmsRefreshWorkerAmazon" "MiqEmsRefreshWorkerAmazon.*"
|
||||
ProcessMatch "MiqEmsRefreshWorkerOpenstack" "MiqEmsRefreshWorkerOpenstack.*"
|
||||
|
||||
ProcessMatch "Atomic::ContainerManager::RefreshWorker" "Atomic::ContainerManager::RefreshWorker.*"
|
||||
ProcessMatch "AtomicEnterprise::ContainerManager::RefreshWorker" "AtomicEnterprise::ContainerManager::RefreshWorker.*"
|
||||
ProcessMatch "Hawkular::MiddlewareManager::RefreshWorker" "Hawkular::MiddlewareManager::RefreshWorker.*"
|
||||
ProcessMatch "Kubernetes::ContainerManager::RefreshWorker" "Kubernetes::ContainerManager::RefreshWorker.*"
|
||||
ProcessMatch "Openshift::ContainerManager::RefreshWorker" "Openshift::ContainerManager::RefreshWorker.*"
|
||||
ProcessMatch "OpenshiftEnterprise::ContainerManager::RefreshWorker" "OpenshiftEnterprise::ContainerManager::RefreshWorker.*"
|
||||
ProcessMatch "MiqNetappRefreshWorker" "MiqNetappRefreshWorker.*"
|
||||
ProcessMatch "MiqSmisRefreshWorker" "MiqSmisRefreshWorker.*"
|
||||
|
||||
|
||||
# Provider EventCatcher Workers:
|
||||
ProcessMatch "Vmware::InfraManager::EventCatcher" "Vmware::InfraManager::EventCatcher.*"
|
||||
ProcessMatch "Redhat::InfraManager::EventCatcher" "Redhat::InfraManager::EventCatcher.*"
|
||||
ProcessMatch "Openstack::InfraManager::EventCatcher" "Openstack::InfraManager::EventCatcher.*"
|
||||
ProcessMatch "MiqEventCatcherVmware" "MiqEventCatcherVmware.*"
|
||||
ProcessMatch "MiqEventCatcherRedhat" "MiqEventCatcherRedhat.*"
|
||||
|
||||
ProcessMatch "Amazon::CloudManager::EventCatcher" "Amazon::CloudManager::EventCatcher.*"
|
||||
ProcessMatch "Azure::CloudManager::EventCatcher" "Azure::CloudManager::EventCatcher.*"
|
||||
ProcessMatch "Openstack::CloudManager::EventCatcher" "Openstack::CloudManager::EventCatcher.*"
|
||||
ProcessMatch "MiqEventCatcherAmazon" "MiqEventCatcherAmazon.*"
|
||||
ProcessMatch "MiqEventCatcherOpenstack" "MiqEventCatcherOpenstack.*"
|
||||
|
||||
ProcessMatch "Atomic::ContainerManager::EventCatcher" "Atomic::ContainerManager::EventCatcher.*"
|
||||
ProcessMatch "AtomicEnterprise::ContainerManager::EventCatcher" "AtomicEnterprise::ContainerManager::EventCatcher.*"
|
||||
ProcessMatch "Kubernetes::ContainerManager::EventCatcher" "Kubernetes::ContainerManager::EventCatcher.*"
|
||||
ProcessMatch "Openshift::ContainerManager::EventCatcher" "Openshift::ContainerManager::EventCatcher.*"
|
||||
ProcessMatch "OpenshiftEnterprise::ContainerManager::EventCatcher" "OpenshiftEnterprise::ContainerManager::EventCatcher.*"
|
||||
|
||||
|
||||
# Provider MetricsCollector Workers:
|
||||
ProcessMatch "Vmware::InfraManager::MetricsCollectorWorker" "Vmware::InfraManager::MetricsCollectorWorker.*"
|
||||
ProcessMatch "Redhat::InfraManager::MetricsCollectorWorker" "Redhat::InfraManager::MetricsCollectorWorker.*"
|
||||
ProcessMatch "Openstack::InfraManager::MetricsCollectorWorker" "Openstack::InfraManager::MetricsCollectorWorker.*"
|
||||
ProcessMatch "MiqEmsMetricsCollectorWorkerVmware" "MiqEmsMetricsCollectorWorkerVmware.*"
|
||||
ProcessMatch "MiqEmsMetricsCollectorWorkerRedhat" "MiqEmsMetricsCollectorWorkerRedhat.*"
|
||||
|
||||
ProcessMatch "Amazon::CloudManager::MetricsCollectorWorker" "Amazon::CloudManager::MetricsCollectorWorker.*"
|
||||
ProcessMatch "Openstack::CloudManager::MetricsCollectorWorker" "Openstack::CloudManager::MetricsCollectorWorker.*"
|
||||
ProcessMatch "MiqEmsMetricsCollectorWorkerAmazon" "MiqEmsMetricsCollectorWorkerAmazon.*"
|
||||
ProcessMatch "MiqEmsMetricsCollectorWorkerOpenstack" "MiqEmsMetricsCollectorWorkerOpenstack.*"
|
||||
|
||||
ProcessMatch "Atomic::ContainerManager::MetricsCollectorWorker" "Atomic::ContainerManager::MetricsCollectorWorker.*"
|
||||
ProcessMatch "AtomicEnterprise::ContainerManager::MetricsCollectorWorker" "AtomicEnterprise::ContainerManager::MetricsCollectorWorker.*"
|
||||
ProcessMatch "Kubernetes::ContainerManager::MetricsCollectorWorker" "Kubernetes::ContainerManager::MetricsCollectorWorker.*"
|
||||
ProcessMatch "Openshift::ContainerManager::MetricsCollectorWorker" "Openshift::ContainerManager::MetricsCollectorWorker.*"
|
||||
ProcessMatch "OpenshiftEnterprise::ContainerManager::MetricsCollectorWorker" "OpenshiftEnterprise::ContainerManager::MetricsCollectorWorker.*"
|
||||
|
||||
ProcessMatch "MiqStorageMetricsCollectorWorker" "MiqStorageMetricsCollectorWorker.*"
|
||||
|
||||
ProcessMatch "carbon-cache" "python.+carbon-cache"
|
||||
Process "grafana-server"
|
||||
ProcessMatch "collectd" "/usr/sbin/collectd.+-C.+/etc/collectd.conf"
|
||||
Process "httpd"
|
||||
Process "memcached"
|
||||
ProcessMatch "postgres" "postgres.*"
|
||||
# Catch all-other ruby processes:
|
||||
Process "ruby"
|
||||
</Plugin>
|
||||
|
||||
<Plugin swap>
|
||||
ReportBytes true
|
||||
ValuesPercentage true
|
||||
</Plugin>
|
||||
|
||||
<Plugin "tail">
|
||||
<File "/var/www/miq/vmdb/log/api.log">
|
||||
Instance "api"
|
||||
<Match>
|
||||
Regex " ERROR "
|
||||
DSType "CounterInc"
|
||||
Type "counter"
|
||||
Instance "total"
|
||||
</Match>
|
||||
</File>
|
||||
<File "/var/www/miq/vmdb/log/appliance_console.log">
|
||||
Instance "appliance_console"
|
||||
<Match>
|
||||
Regex " ERROR "
|
||||
DSType "CounterInc"
|
||||
Type "counter"
|
||||
Instance "total"
|
||||
</Match>
|
||||
</File>
|
||||
<File "/var/www/miq/vmdb/log/audit.log">
|
||||
Instance "audit"
|
||||
<Match>
|
||||
Regex " ERROR "
|
||||
DSType "CounterInc"
|
||||
Type "counter"
|
||||
Instance "total"
|
||||
</Match>
|
||||
</File>
|
||||
<File "/var/www/miq/vmdb/log/automation.log">
|
||||
Instance "automation"
|
||||
<Match>
|
||||
Regex " ERROR "
|
||||
DSType "CounterInc"
|
||||
Type "counter"
|
||||
Instance "total"
|
||||
</Match>
|
||||
</File>
|
||||
<File "/var/www/miq/vmdb/log/aws.log">
|
||||
Instance "aws"
|
||||
<Match>
|
||||
Regex " ERROR "
|
||||
DSType "CounterInc"
|
||||
Type "counter"
|
||||
Instance "total"
|
||||
</Match>
|
||||
</File>
|
||||
<File "/var/www/miq/vmdb/log/evm.log">
|
||||
Instance "evm"
|
||||
<Match>
|
||||
Regex " ERROR "
|
||||
DSType "CounterInc"
|
||||
Type "counter"
|
||||
Instance "total"
|
||||
</Match>
|
||||
</File>
|
||||
<File "/var/www/miq/vmdb/log/fog.log">
|
||||
Instance "fog"
|
||||
<Match>
|
||||
Regex " ERROR "
|
||||
DSType "CounterInc"
|
||||
Type "counter"
|
||||
Instance "total"
|
||||
</Match>
|
||||
</File>
|
||||
<File "/var/www/miq/vmdb/log/kubernetes.log">
|
||||
Instance "kubernetes"
|
||||
<Match>
|
||||
Regex " ERROR "
|
||||
DSType "CounterInc"
|
||||
Type "counter"
|
||||
Instance "total"
|
||||
</Match>
|
||||
</File>
|
||||
<File "/var/www/miq/vmdb/log/policy.log">
|
||||
Instance "policy"
|
||||
<Match>
|
||||
Regex " ERROR "
|
||||
DSType "CounterInc"
|
||||
Type "counter"
|
||||
Instance "total"
|
||||
</Match>
|
||||
</File>
|
||||
<File "/var/www/miq/vmdb/log/production.log">
|
||||
Instance "production"
|
||||
<Match>
|
||||
Regex " ERROR "
|
||||
DSType "CounterInc"
|
||||
Type "counter"
|
||||
Instance "total"
|
||||
</Match>
|
||||
</File>
|
||||
<File "/var/www/miq/vmdb/log/rhevm.log">
|
||||
Instance "rhevm"
|
||||
<Match>
|
||||
Regex " ERROR "
|
||||
DSType "CounterInc"
|
||||
Type "counter"
|
||||
Instance "total"
|
||||
</Match>
|
||||
</File>
|
||||
<File "/var/www/miq/vmdb/log/scvmm.log">
|
||||
Instance "scvmm"
|
||||
<Match>
|
||||
Regex " ERROR "
|
||||
DSType "CounterInc"
|
||||
Type "counter"
|
||||
Instance "total"
|
||||
</Match>
|
||||
</File>
|
||||
<File "/var/www/miq/vmdb/log/vim.log">
|
||||
Instance "vim"
|
||||
<Match>
|
||||
Regex " ERROR "
|
||||
DSType "CounterInc"
|
||||
Type "counter"
|
||||
Instance "total"
|
||||
</Match>
|
||||
</File>
|
||||
</Plugin>
|
||||
|
||||
# Include other collectd configuration files
|
||||
Include "/etc/collectd.d"
|
@ -12,6 +12,10 @@ collectd_packages:
|
||||
cfme:
|
||||
- collectd
|
||||
- collectd-apache
|
||||
cfme-all-in-one:
|
||||
- collectd
|
||||
- collectd-postgresql
|
||||
- collectd-apache
|
||||
cfme-vmdb:
|
||||
- collectd
|
||||
- collectd-postgresql
|
||||
|
@ -4,7 +4,7 @@
|
||||
#
|
||||
|
||||
- name: Remove existing dashboards
|
||||
command: "curl -X DELETE -H 'Authorization: Bearer {{grafana_api_key}}' -H 'Content-Type: application/json' http://{{grafana_host}}:{{grafana_port}}/api/dashboards/db/{{item.process_list_name|lower}}-general-system-performance"
|
||||
command: "curl -X DELETE -H 'Content-Type: application/json' http://{{grafana_username}}:{{grafana_password}}@{{grafana_host}}:{{grafana_port}}/api/dashboards/db/{{item.process_list_name|lower}}-general-system-performance"
|
||||
when: overwrite_existing
|
||||
with_items: dashboards
|
||||
|
||||
@ -18,7 +18,7 @@
|
||||
with_items: dashboards
|
||||
|
||||
- name: Upload dashboards to grafana
|
||||
command: "curl -X POST -H 'Authorization: Bearer {{grafana_api_key}}' -H 'Content-Type: application/json' -d @{{role_path}}/files/{{item.process_list_name}}_general_system_performance.json http://{{grafana_host}}:{{grafana_port}}/api/dashboards/db"
|
||||
command: "curl -X POST -H 'Content-Type: application/json' -d @{{role_path}}/files/{{item.process_list_name}}_general_system_performance.json http://{{grafana_username}}:{{grafana_password}}@{{grafana_host}}:{{grafana_port}}/api/dashboards/db"
|
||||
with_items: dashboards
|
||||
|
||||
- name: Remove leftover json file(s)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -2531,7 +2531,8 @@
|
||||
"editable": true,
|
||||
"type": "graph",
|
||||
"isNew": true,
|
||||
"id": 59,
|
||||
{% if vars.update({'panel_idx': (vars.panel_idx + 1)}) %} {% endif %}
|
||||
"id": {{vars.panel_idx}},
|
||||
"datasource": null,
|
||||
"renderer": "flot",
|
||||
"x-axis": true,
|
||||
@ -2605,7 +2606,8 @@
|
||||
"editable": true,
|
||||
"type": "graph",
|
||||
"isNew": true,
|
||||
"id": 60,
|
||||
{% if vars.update({'panel_idx': (vars.panel_idx + 1)}) %} {% endif %}
|
||||
"id": {{vars.panel_idx}},
|
||||
"datasource": null,
|
||||
"renderer": "flot",
|
||||
"x-axis": true,
|
||||
@ -2671,7 +2673,8 @@
|
||||
"editable": true,
|
||||
"type": "graph",
|
||||
"isNew": true,
|
||||
"id": 63,
|
||||
{% if vars.update({'panel_idx': (vars.panel_idx + 1)}) %} {% endif %}
|
||||
"id": {{vars.panel_idx}},
|
||||
"datasource": null,
|
||||
"renderer": "flot",
|
||||
"x-axis": true,
|
||||
@ -2737,7 +2740,8 @@
|
||||
"editable": true,
|
||||
"type": "graph",
|
||||
"isNew": true,
|
||||
"id": 61,
|
||||
{% if vars.update({'panel_idx': (vars.panel_idx + 1)}) %} {% endif %}
|
||||
"id": {{vars.panel_idx}},
|
||||
"datasource": null,
|
||||
"renderer": "flot",
|
||||
"x-axis": true,
|
||||
@ -2803,7 +2807,8 @@
|
||||
"editable": true,
|
||||
"type": "graph",
|
||||
"isNew": true,
|
||||
"id": 62,
|
||||
{% if vars.update({'panel_idx': (vars.panel_idx + 1)}) %} {% endif %}
|
||||
"id": {{vars.panel_idx}},
|
||||
"datasource": null,
|
||||
"renderer": "flot",
|
||||
"x-axis": true,
|
||||
@ -2873,7 +2878,8 @@
|
||||
"editable": true,
|
||||
"type": "graph",
|
||||
"isNew": true,
|
||||
"id": 64,
|
||||
{% if vars.update({'panel_idx': (vars.panel_idx + 1)}) %} {% endif %}
|
||||
"id": {{vars.panel_idx}},
|
||||
"datasource": null,
|
||||
"renderer": "flot",
|
||||
"x-axis": true,
|
||||
|
@ -260,6 +260,209 @@ per_process_panels:
|
||||
- name: "MiqStorageMetricsCollectorWorker"
|
||||
processes:
|
||||
- "MiqStorageMetricsCollectorWorker"
|
||||
CFME-All-In-One:
|
||||
- name: "Summerized"
|
||||
processes:
|
||||
- ruby
|
||||
- postgres
|
||||
- httpd
|
||||
- name: "Memcached"
|
||||
processes:
|
||||
- memcached
|
||||
- name: "Carbon"
|
||||
processes:
|
||||
- carbon-cache
|
||||
- name: "Grafana"
|
||||
processes:
|
||||
- grafana-server
|
||||
- name: "Collectd"
|
||||
processes:
|
||||
- collectd
|
||||
- name: "Appliance"
|
||||
processes:
|
||||
- evm_server.rb
|
||||
- evm_watchdog.rb
|
||||
- appliance_console.rb
|
||||
- name: "MiqGenericWorker"
|
||||
processes:
|
||||
- MiqGenericWorker
|
||||
- name: "MiqPriorityWorker"
|
||||
processes:
|
||||
- MiqPriorityWorker
|
||||
- name: "MiqScheduleWorker"
|
||||
processes:
|
||||
- MiqScheduleWorker
|
||||
- name: "MiqUiWorker"
|
||||
processes:
|
||||
- MiqUiWorker
|
||||
- name: "MiqWebServiceWorker"
|
||||
processes:
|
||||
- MiqWebServiceWorker
|
||||
- name: "MiqReportingWorker"
|
||||
processes:
|
||||
- MiqReportingWorker
|
||||
- name: "MiqEventHandler"
|
||||
processes:
|
||||
- MiqEventHandler
|
||||
- name: "MiqSmartProxyWorker"
|
||||
processes:
|
||||
- MiqSmartProxyWorker
|
||||
- name: "MiqReplicationWorker"
|
||||
processes:
|
||||
- MiqReplicationWorker
|
||||
- "evm:dbsync:replicate"
|
||||
- name: "MiqEmsMetricsProcessorWorker"
|
||||
processes:
|
||||
- MiqEmsMetricsProcessorWorker
|
||||
# VMware Additional Workers:
|
||||
- name: "MiqVimBrokerWorker"
|
||||
processes:
|
||||
- MiqVimBrokerWorker
|
||||
- name: "MiqEmsRefreshCoreWorker"
|
||||
processes:
|
||||
- MiqEmsRefreshCoreWorker
|
||||
- name: "MiqVmdbStorageBridgeWorker"
|
||||
processes:
|
||||
- MiqVmdbStorageBridgeWorker
|
||||
# Refresh Workers:
|
||||
- name: "Vmware::InfraManager::RefreshWorker"
|
||||
processes:
|
||||
- "Vmware::InfraManager::RefreshWorker"
|
||||
- "MiqEmsRefreshWorkerVmware"
|
||||
- name: "Redhat::InfraManager::RefreshWorker"
|
||||
processes:
|
||||
- "Redhat::InfraManager::RefreshWorker"
|
||||
- "MiqEmsRefreshWorkerRedhat"
|
||||
- name: "Microsoft::InfraManager::RefreshWorker"
|
||||
processes:
|
||||
- "Microsoft::InfraManager::RefreshWorker"
|
||||
- "MiqEmsRefreshWorkerMicrosoft"
|
||||
- name: "Openstack::InfraManager::RefreshWorker"
|
||||
processes:
|
||||
- "Openstack::InfraManager::RefreshWorker"
|
||||
- name: "AnsibleTower::ConfigurationManager::RefreshWorker"
|
||||
processes:
|
||||
- "AnsibleTower::ConfigurationManager::RefreshWorker"
|
||||
- name: "Foreman RefreshWorkers"
|
||||
processes:
|
||||
- "Foreman::ConfigurationManager::RefreshWorker"
|
||||
- "MiqEmsRefreshWorkerForemanConfiguration"
|
||||
- "Foreman::ProvisioningManager::RefreshWorker"
|
||||
- "MiqEmsRefreshWorkerForemanProvisioning"
|
||||
- name: "Amazon::CloudManager::RefreshWorker"
|
||||
processes:
|
||||
- "Amazon::CloudManager::RefreshWorker"
|
||||
- "MiqEmsRefreshWorkerAmazon"
|
||||
- name: "Azure::CloudManager::RefreshWorker"
|
||||
processes:
|
||||
- "Azure::CloudManager::RefreshWorker"
|
||||
- name: "Google::CloudManager::RefreshWorker"
|
||||
processes:
|
||||
- "Google::CloudManager::RefreshWorker"
|
||||
- name: "Openstack::CloudManager::RefreshWorker"
|
||||
processes:
|
||||
- "Openstack::CloudManager::RefreshWorker"
|
||||
- "MiqEmsRefreshWorkerOpenstack"
|
||||
- name: "Atomic::ContainerManager::RefreshWorker"
|
||||
processes:
|
||||
- "Atomic::ContainerManager::RefreshWorker"
|
||||
- name: "AtomicEnterprise::ContainerManager::RefreshWorker"
|
||||
processes:
|
||||
- "AtomicEnterprise::ContainerManager::RefreshWorker"
|
||||
- name: "Hawkular::MiddlewareManager::RefreshWorker"
|
||||
processes:
|
||||
- "Hawkular::MiddlewareManager::RefreshWorker"
|
||||
- name: "Kubernetes::ContainerManager::RefreshWorker"
|
||||
processes:
|
||||
- "Kubernetes::ContainerManager::RefreshWorker"
|
||||
- name: "Openshift::ContainerManager::RefreshWorker"
|
||||
processes:
|
||||
- "Openshift::ContainerManager::RefreshWorker"
|
||||
- name: "OpenshiftEnterprise::ContainerManager::RefreshWorker"
|
||||
processes:
|
||||
- "OpenshiftEnterprise::ContainerManager::RefreshWorker"
|
||||
- name: "MiqNetappRefreshWorker"
|
||||
processes:
|
||||
- "MiqNetappRefreshWorker"
|
||||
- name: "MiqSmisRefreshWorker"
|
||||
processes:
|
||||
- "MiqSmisRefreshWorker"
|
||||
# Event Catchers:
|
||||
- name: "Vmware::InfraManager::EventCatcher"
|
||||
processes:
|
||||
- "Vmware::InfraManager::EventCatcher"
|
||||
- "MiqEventCatcherVmware"
|
||||
- name: "Redhat::InfraManager::EventCatcher"
|
||||
processes:
|
||||
- "Redhat::InfraManager::EventCatcher"
|
||||
- "MiqEventCatcherRedhat"
|
||||
- name: "Openstack::InfraManager::EventCatcher"
|
||||
processes:
|
||||
- "Openstack::InfraManager::EventCatcher"
|
||||
- name: "Amazon::CloudManager::EventCatcher"
|
||||
processes:
|
||||
- "Amazon::CloudManager::EventCatcher"
|
||||
- "MiqEventCatcherAmazon"
|
||||
- name: "Azure::CloudManager::EventCatcher"
|
||||
processes:
|
||||
- "Azure::CloudManager::EventCatcher"
|
||||
- name: "Openstack::CloudManager::EventCatcher"
|
||||
processes:
|
||||
- "Openstack::CloudManager::EventCatcher"
|
||||
- "MiqEventCatcherOpenstack"
|
||||
- name: "Atomic::ContainerManager::EventCatcher"
|
||||
processes:
|
||||
- "Atomic::ContainerManager::EventCatcher"
|
||||
- name: "AtomicEnterprise::ContainerManager::EventCatcher"
|
||||
processes:
|
||||
- "AtomicEnterprise::ContainerManager::EventCatcher"
|
||||
- name: "Kubernetes::ContainerManager::EventCatcher"
|
||||
processes:
|
||||
- "Kubernetes::ContainerManager::EventCatcher"
|
||||
- name: "Openshift::ContainerManager::EventCatcher"
|
||||
processes:
|
||||
- "Openshift::ContainerManager::EventCatcher"
|
||||
- name: "OpenshiftEnterprise::ContainerManager::EventCatcher"
|
||||
processes:
|
||||
- "OpenshiftEnterprise::ContainerManager::EventCatcher"
|
||||
# Metrics Collector Workers
|
||||
- name: "Vmware::InfraManager::MetricsCollectorWorker"
|
||||
processes:
|
||||
- "Vmware::InfraManager::MetricsCollectorWorker"
|
||||
- "MiqMetricsCollectorWorkerVmware"
|
||||
- name: "Redhat::InfraManager::MetricsCollectorWorker"
|
||||
processes:
|
||||
- "Redhat::InfraManager::MetricsCollectorWorker"
|
||||
- "MiqMetricsCollectorWorkerRedhat"
|
||||
- name: "Openstack::InfraManager::MetricsCollectorWorker"
|
||||
processes:
|
||||
- "Openstack::InfraManager::MetricsCollectorWorker"
|
||||
- name: "Amazon::CloudManager::MetricsCollectorWorker"
|
||||
processes:
|
||||
- "Amazon::CloudManager::MetricsCollectorWorker"
|
||||
- "MiqMetricsCollectorWorkerAmazon"
|
||||
- name: "Openstack::CloudManager::MetricsCollectorWorker"
|
||||
processes:
|
||||
- "Openstack::CloudManager::MetricsCollectorWorker"
|
||||
- "MiqMetricsCollectorWorkerOpenstack"
|
||||
- name: "Atomic::ContainerManager::MetricsCollectorWorker"
|
||||
processes:
|
||||
- "Atomic::ContainerManager::MetricsCollectorWorker"
|
||||
- name: "AtomicEnterprise::ContainerManager::MetricsCollectorWorker"
|
||||
processes:
|
||||
- "AtomicEnterprise::ContainerManager::MetricsCollectorWorker"
|
||||
- name: "Kubernetes::ContainerManager::MetricsCollectorWorker"
|
||||
processes:
|
||||
- "Kubernetes::ContainerManager::MetricsCollectorWorker"
|
||||
- name: "Openshift::ContainerManager::MetricsCollectorWorker"
|
||||
processes:
|
||||
- "Openshift::ContainerManager::MetricsCollectorWorker"
|
||||
- name: "OpenshiftEnterprise::ContainerManager::MetricsCollectorWorker"
|
||||
processes:
|
||||
- "OpenshiftEnterprise::ContainerManager::MetricsCollectorWorker"
|
||||
- name: "MiqStorageMetricsCollectorWorker"
|
||||
processes:
|
||||
- "MiqStorageMetricsCollectorWorker"
|
||||
CFME-Amazon:
|
||||
- name: "Summerized"
|
||||
processes:
|
||||
|
@ -14,7 +14,7 @@
|
||||
- all_network_graphs
|
||||
|
||||
- name: Remove Existing Dashboards
|
||||
command: "curl -X DELETE -H 'Authorization: Bearer {{grafana_api_key}}' -H 'Content-Type: application/json' http://{{grafana_host}}:{{grafana_port}}/api/dashboards/db/{{item}}"
|
||||
command: "curl -X DELETE -H 'Content-Type: application/json' http://{{grafana_username}}:{{grafana_password}}@{{grafana_host}}:{{grafana_port}}/api/dashboards/db/{{item}}"
|
||||
when: overwrite_existing
|
||||
with_items:
|
||||
- "{{dashboard_cloud_name}}-all-nodes-cpu"
|
||||
@ -25,7 +25,7 @@
|
||||
- cloud-system-performance-comparsion
|
||||
|
||||
- name: Upload Dashboards to Grafana
|
||||
command: "curl -X POST -H 'Authorization: Bearer {{grafana_api_key}}' -H 'Content-Type: application/json' -d @{{item}} http://{{grafana_host}}:{{grafana_port}}/api/dashboards/db"
|
||||
command: "curl -X POST -H 'Content-Type: application/json' -d @{{item}} http://{{grafana_username}}:{{grafana_password}}@{{grafana_host}}:{{grafana_port}}/api/dashboards/db"
|
||||
with_items:
|
||||
- "{{role_path}}/files/all_cpu_graphs.json"
|
||||
- "{{role_path}}/files/all_memory_graphs.json"
|
||||
|
@ -19,6 +19,14 @@
|
||||
with_items:
|
||||
- https://grafanarel.s3.amazonaws.com/builds/grafana-2.6.0-1.x86_64.rpm
|
||||
|
||||
- name: Set grafana server port
|
||||
ini_file:
|
||||
dest=/etc/grafana/grafana.ini
|
||||
section=server
|
||||
option=http_port
|
||||
value={{grafana_port}}
|
||||
become: true
|
||||
|
||||
# disable firewalld (might need to create specific firewall rules or leave it to admin to do via iptables)
|
||||
|
||||
- name: disable firewalld
|
||||
@ -28,8 +36,35 @@
|
||||
#
|
||||
# setup the grafana-server service
|
||||
#
|
||||
|
||||
- name: Setup grafana-server service
|
||||
service: name=grafana-server state=started enabled=true
|
||||
become: true
|
||||
|
||||
- name: Wait for grafana to be ready
|
||||
wait_for: host={{grafana_host}} port={{grafana_port}} delay=5 timeout=30
|
||||
|
||||
#
|
||||
# Add graphite server as a default datasource
|
||||
#
|
||||
- name: Ensure {{role_path}}/files directory exists
|
||||
file: path={{role_path}}/files state=directory
|
||||
connection: local
|
||||
|
||||
- name: Create data_source.json
|
||||
template:
|
||||
src: data_source.json.j2
|
||||
dest: "{{role_path}}/files/data_source.json"
|
||||
connection: local
|
||||
|
||||
- name: Create Data Source on grafana server
|
||||
command: "curl -X POST -H 'Content-Type: application/json' -d @{{role_path}}/files/data_source.json http://{{grafana_username}}:{{grafana_password}}@{{grafana_host}}:{{grafana_port}}/api/datasources"
|
||||
connection: local
|
||||
|
||||
- name: Remove leftover json file
|
||||
file: path={{role_path}}/files/data_source.json state=absent
|
||||
connection: local
|
||||
|
||||
- name: Disable EPEL
|
||||
shell: rpm -e epel-release
|
||||
ignore_errors: true
|
||||
become: true
|
||||
|
10
ansible/install/roles/grafana/templates/data_source.json.j2
Normal file
10
ansible/install/roles/grafana/templates/data_source.json.j2
Normal file
@ -0,0 +1,10 @@
|
||||
{
|
||||
"name":"graphite",
|
||||
"type":"graphite",
|
||||
"url":"http://{{graphite_host}}:{{graphite_port}}/",
|
||||
"access":"proxy",
|
||||
"isDefault":true,
|
||||
"basicAuth":true,
|
||||
"basicAuthUser":"{{graphite_username}}",
|
||||
"basicAuthPassword":"{{graphite_password}}"
|
||||
}
|
@ -75,7 +75,7 @@ MAX_UPDATES_PER_SECOND = 2000
|
||||
# database files to all get created and thus longer until the data becomes usable.
|
||||
# Setting this value high (like "inf" for infinity) will cause graphite to create
|
||||
# the files quickly but at the risk of slowing I/O down considerably for a while.
|
||||
MAX_CREATES_PER_MINUTE = 200
|
||||
MAX_CREATES_PER_MINUTE = 2000
|
||||
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
@ -44,9 +44,9 @@
|
||||
when: graphite_db_installed.rc != 0
|
||||
register: apache_needs_restart
|
||||
|
||||
- name: copy httpd graphite-web config
|
||||
copy:
|
||||
src=graphite-web.conf
|
||||
- name: Setup httpd graphite-web config
|
||||
template:
|
||||
src=graphite-web.conf.j2
|
||||
dest=/etc/httpd/conf.d/graphite-web.conf
|
||||
owner=root
|
||||
group=root
|
||||
@ -120,3 +120,8 @@
|
||||
service: name=carbon-cache state=restarted enabled=true
|
||||
become: true
|
||||
when: carbon_cache_needs_restart.changed
|
||||
|
||||
- name: Disable EPEL
|
||||
shell: rpm -e epel-release
|
||||
ignore_errors: true
|
||||
become: true
|
||||
|
@ -1,6 +1,8 @@
|
||||
# Graphite Web Basic mod_wsgi vhost
|
||||
|
||||
<VirtualHost *:80>
|
||||
{% if graphite_port != 80 %}
|
||||
Listen {{graphite_port}}
|
||||
{% endif %}
|
||||
<VirtualHost *:{{graphite_port}}>
|
||||
DocumentRoot "/usr/share/graphite/webapp"
|
||||
ErrorLog /var/log/httpd/graphite-web-error.log
|
||||
CustomLog /var/log/httpd/graphite-web-access.log common
|
Loading…
Reference in New Issue
Block a user