Merge "Address bandit gate failures"

This commit is contained in:
Zuul 2020-02-18 17:35:45 +00:00 committed by Gerrit Code Review
commit 75474c01b8
8 changed files with 39 additions and 40 deletions

View File

@ -1,6 +1,6 @@
#!/usr/bin/python #!/usr/bin/python
import subprocess import subprocess # nosec
import json import json
import sys import sys
from argparse import * from argparse import *
@ -60,7 +60,7 @@ class cephCRUSH():
if 'all' in poolName or 'All' in poolName: if 'all' in poolName or 'All' in poolName:
try: try:
poolLs = 'ceph osd pool ls -f json-pretty' poolLs = 'ceph osd pool ls -f json-pretty'
poolstr = subprocess.check_output(poolLs, shell=True) poolstr = subprocess.check_output(poolLs, shell=True) # nosec
self.listPoolName = json.loads(poolstr) self.listPoolName = json.loads(poolstr)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
print('{}'.format(e)) print('{}'.format(e))
@ -72,7 +72,7 @@ class cephCRUSH():
try: try:
"""Retrieve the crush hierarchies""" """Retrieve the crush hierarchies"""
crushTree = "ceph osd crush tree -f json-pretty | jq .nodes" crushTree = "ceph osd crush tree -f json-pretty | jq .nodes"
chstr = subprocess.check_output(crushTree, shell=True) chstr = subprocess.check_output(crushTree, shell=True) # nosec
self.crushHierarchy = json.loads(chstr) self.crushHierarchy = json.loads(chstr)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
print('{}'.format(e)) print('{}'.format(e))
@ -107,8 +107,8 @@ class cephCRUSH():
self.poolSize = 0 self.poolSize = 0
def isNautilus(self): def isNautilus(self):
grepResult = int(subprocess.check_output('ceph mon versions | egrep -q "nautilus" | echo $?', shell=True)) grepResult = int(subprocess.check_output('ceph mon versions | egrep -q "nautilus" | echo $?', shell=True)) # nosec
return True if grepResult == 0 else False return grepResult == 0
def getPoolSize(self, poolName): def getPoolSize(self, poolName):
""" """
@ -119,7 +119,7 @@ class cephCRUSH():
"""Get the size attribute of the poolName""" """Get the size attribute of the poolName"""
try: try:
poolGet = 'ceph osd pool get ' + poolName + ' size -f json-pretty' poolGet = 'ceph osd pool get ' + poolName + ' size -f json-pretty'
szstr = subprocess.check_output(poolGet, shell=True) szstr = subprocess.check_output(poolGet, shell=True) # nosec
pSize = json.loads(szstr) pSize = json.loads(szstr)
self.poolSize = pSize['size'] self.poolSize = pSize['size']
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
@ -130,7 +130,7 @@ class cephCRUSH():
def checkPGs(self, poolName): def checkPGs(self, poolName):
poolPGs = self.poolPGs['pg_stats'] if self.isNautilus() else self.poolPGs poolPGs = self.poolPGs['pg_stats'] if self.isNautilus() else self.poolPGs
if len(poolPGs) == 0: if not poolPGs:
return return
print('Checking PGs in pool {} ...'.format(poolName)), print('Checking PGs in pool {} ...'.format(poolName)),
badPGs = False badPGs = False
@ -160,7 +160,8 @@ class cephCRUSH():
"""traverse up (to the root) one level""" """traverse up (to the root) one level"""
traverseID = self.crushFD[traverseID]['id'] traverseID = self.crushFD[traverseID]['id']
traverseLevel += 1 traverseLevel += 1
assert (traverseLevel == self.osd_depth), "OSD depth mismatch" if not (traverseLevel == self.osd_depth):
raise Exception("OSD depth mismatch")
""" """
check_FD should have check_FD should have
{ {
@ -214,12 +215,13 @@ class cephCRUSH():
elif self.poolSize == 0: elif self.poolSize == 0:
print('Pool {} was not found.'.format(pool)) print('Pool {} was not found.'.format(pool))
continue continue
assert (self.poolSize > 1), "Pool size was incorrectly set" if not self.poolSize > 1:
raise Exception("Pool size was incorrectly set")
try: try:
"""Get the list of PGs in the pool""" """Get the list of PGs in the pool"""
lsByPool = 'ceph pg ls-by-pool ' + pool + ' -f json-pretty' lsByPool = 'ceph pg ls-by-pool ' + pool + ' -f json-pretty'
pgstr = subprocess.check_output(lsByPool, shell=True) pgstr = subprocess.check_output(lsByPool, shell=True) # nosec
self.poolPGs = json.loads(pgstr) self.poolPGs = json.loads(pgstr)
"""Check that OSDs in the PG are in separate failure domains""" """Check that OSDs in the PG are in separate failure domains"""
self.checkPGs(pool) self.checkPGs(pool)

View File

@ -1,7 +1,7 @@
#!/usr/bin/python #!/usr/bin/python
import re import re
import os import os
import subprocess import subprocess # nosec
import json import json
MON_REGEX = r"^\d: ([0-9\.]*):\d+/\d* mon.([^ ]*)$" MON_REGEX = r"^\d: ([0-9\.]*):\d+/\d* mon.([^ ]*)$"
@ -15,7 +15,7 @@ monmap_command = "ceph --cluster=${NAMESPACE} mon getmap > /tmp/monmap && monmap
def extract_mons_from_monmap(): def extract_mons_from_monmap():
monmap = subprocess.check_output(monmap_command, shell=True) monmap = subprocess.check_output(monmap_command, shell=True) # nosec
mons = {} mons = {}
for line in monmap.split("\n"): for line in monmap.split("\n"):
m = re.match(MON_REGEX, line) m = re.match(MON_REGEX, line)
@ -24,7 +24,7 @@ def extract_mons_from_monmap():
return mons return mons
def extract_mons_from_kubeapi(): def extract_mons_from_kubeapi():
kubemap = subprocess.check_output(kubectl_command, shell=True) kubemap = subprocess.check_output(kubectl_command, shell=True) # nosec
return json.loads(kubemap) return json.loads(kubemap)
current_mons = extract_mons_from_monmap() current_mons = extract_mons_from_monmap()
@ -37,11 +37,11 @@ removed_mon = False
for mon in current_mons: for mon in current_mons:
if not mon in expected_mons: if not mon in expected_mons:
print("removing zombie mon %s" % mon) print("removing zombie mon %s" % mon)
subprocess.call(["ceph", "--cluster", os.environ["NAMESPACE"], "mon", "remove", mon]) subprocess.call(["ceph", "--cluster", os.environ["NAMESPACE"], "mon", "remove", mon]) # nosec
removed_mon = True removed_mon = True
elif current_mons[mon] != expected_mons[mon]: # check if for some reason the ip of the mon changed elif current_mons[mon] != expected_mons[mon]: # check if for some reason the ip of the mon changed
print("ip change detected for pod %s" % mon) print("ip change detected for pod %s" % mon)
subprocess.call(["kubectl", "--namespace", os.environ["NAMESPACE"], "delete", "pod", mon]) subprocess.call(["kubectl", "--namespace", os.environ["NAMESPACE"], "delete", "pod", mon]) # nosec
removed_mon = True removed_mon = True
print("deleted mon %s via the kubernetes api" % mon) print("deleted mon %s via the kubernetes api" % mon)

View File

@ -1,6 +1,6 @@
#!/usr/bin/python #!/usr/bin/python
import subprocess import subprocess # nosec
import json import json
import sys import sys
import collections import collections
@ -11,7 +11,7 @@ if (int(len(sys.argv)) == 1):
else: else:
poolName = sys.argv[1] poolName = sys.argv[1]
cmdRep = 'ceph osd map' + ' ' + str(poolName) + ' ' + 'testreplication -f json-pretty' cmdRep = 'ceph osd map' + ' ' + str(poolName) + ' ' + 'testreplication -f json-pretty'
objectRep = subprocess.check_output(cmdRep, shell=True) objectRep = subprocess.check_output(cmdRep, shell=True) # nosec
repOut = json.loads(objectRep) repOut = json.loads(objectRep)
osdNumbers = repOut['up'] osdNumbers = repOut['up']
print("Test object got replicated on these osds: %s" % str(osdNumbers)) print("Test object got replicated on these osds: %s" % str(osdNumbers))
@ -19,7 +19,7 @@ else:
osdHosts= [] osdHosts= []
for osd in osdNumbers: for osd in osdNumbers:
cmdFind = 'ceph osd find' + ' ' + str(osd) cmdFind = 'ceph osd find' + ' ' + str(osd)
osdFind = subprocess.check_output(cmdFind , shell=True) osdFind = subprocess.check_output(cmdFind , shell=True) # nosec
osdHost = json.loads(osdFind) osdHost = json.loads(osdFind)
osdHostLocation = osdHost['crush_location'] osdHostLocation = osdHost['crush_location']
osdHosts.append(osdHostLocation['host']) osdHosts.append(osdHostLocation['host'])

View File

@ -21,7 +21,7 @@ import logging
import os import os
import select import select
import signal import signal
import subprocess import subprocess # nosec
import socket import socket
import sys import sys
import tempfile import tempfile
@ -142,7 +142,7 @@ def run_cmd_with_logging(popenargs,
stderr_log_level=logging.INFO, stderr_log_level=logging.INFO,
**kwargs): **kwargs):
"""Run subprocesses and stream output to logger.""" """Run subprocesses and stream output to logger."""
child = subprocess.Popen( child = subprocess.Popen( # nosec
popenargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) popenargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
log_level = { log_level = {
child.stdout: stdout_log_level, child.stdout: stdout_log_level,
@ -266,7 +266,7 @@ def mysqld_bootstrap():
], logger) ], logger)
if not mysql_dbaudit_username: if not mysql_dbaudit_username:
template = ( template = (
"DELETE FROM mysql.user ;\n" "DELETE FROM mysql.user ;\n" # nosec
"CREATE OR REPLACE USER '{0}'@'%' IDENTIFIED BY \'{1}\' ;\n" "CREATE OR REPLACE USER '{0}'@'%' IDENTIFIED BY \'{1}\' ;\n"
"GRANT ALL ON *.* TO '{0}'@'%' WITH GRANT OPTION ;\n" "GRANT ALL ON *.* TO '{0}'@'%' WITH GRANT OPTION ;\n"
"DROP DATABASE IF EXISTS test ;\n" "DROP DATABASE IF EXISTS test ;\n"
@ -277,7 +277,7 @@ def mysqld_bootstrap():
mysql_dbsst_username, mysql_dbsst_password)) mysql_dbsst_username, mysql_dbsst_password))
else: else:
template = ( template = (
"DELETE FROM mysql.user ;\n" "DELETE FROM mysql.user ;\n" # nosec
"CREATE OR REPLACE USER '{0}'@'%' IDENTIFIED BY \'{1}\' ;\n" "CREATE OR REPLACE USER '{0}'@'%' IDENTIFIED BY \'{1}\' ;\n"
"GRANT ALL ON *.* TO '{0}'@'%' WITH GRANT OPTION ;\n" "GRANT ALL ON *.* TO '{0}'@'%' WITH GRANT OPTION ;\n"
"DROP DATABASE IF EXISTS test ;\n" "DROP DATABASE IF EXISTS test ;\n"
@ -537,7 +537,7 @@ def update_grastate_on_restart():
def recover_wsrep_position(): def recover_wsrep_position():
"""Extract recoved wsrep position from uncleanly exited node.""" """Extract recoved wsrep position from uncleanly exited node."""
wsrep_recover = subprocess.Popen( wsrep_recover = subprocess.Popen( # nosec
[ [
'mysqld', '--bind-address=127.0.0.1', 'mysqld', '--bind-address=127.0.0.1',
'--wsrep_cluster_address=gcomm://', '--wsrep-recover' '--wsrep_cluster_address=gcomm://', '--wsrep-recover'

View File

@ -59,7 +59,7 @@ def click_link_by_name(link_name):
browser.quit() browser.quit()
sys.exit(1) sys.exit(1)
def take_screenshot(page_name, artifacts_dir='/tmp/artifacts/'): def take_screenshot(page_name, artifacts_dir='/tmp/artifacts/'): # nosec
file_name = page_name.replace(' ', '_') file_name = page_name.replace(' ', '_')
try: try:
el = WebDriverWait(browser, 15) el = WebDriverWait(browser, 15)
@ -130,7 +130,7 @@ except TimeoutException:
sys.exit(1) sys.exit(1)
logger.info("The following screenshots were captured:") logger.info("The following screenshots were captured:")
for root, dirs, files in os.walk("/tmp/artifacts/"): for root, dirs, files in os.walk("/tmp/artifacts/"): # nosec
for name in files: for name in files:
logger.info(os.path.join(root, name)) logger.info(os.path.join(root, name))

View File

@ -12,7 +12,7 @@ class RedisTest(object):
def test_connection(self): def test_connection(self):
ping = self.redis_conn.ping() ping = self.redis_conn.ping()
assert ping, "No connection to database" if not ping: raise Exception('No connection to database')
print("Successfully connected to database") print("Successfully connected to database")
def database_info(self): def database_info(self):
@ -20,29 +20,29 @@ class RedisTest(object):
for client in self.redis_conn.client_list(): for client in self.redis_conn.client_list():
ip_port.append(client["addr"]) ip_port.append(client["addr"])
print(ip_port) print(ip_port)
assert self.redis_conn.client_list(), "Database client's list is null" if not self.redis_conn.client_list():
raise Exception('Database client list is null')
return ip_port return ip_port
def test_insert_delete_data(self): def test_insert_delete_data(self):
key = "test" key = "test"
value = "it's working" value = "it's working"
result_set = self.redis_conn.set(key, value) result_set = self.redis_conn.set(key, value)
assert result_set, "Error: SET command failed" if not result_set: raise Exception('ERROR: SET command failed')
print("Successfully SET keyvalue pair") print("Successfully SET keyvalue pair")
result_get = self.redis_conn.get(key) result_get = self.redis_conn.get(key)
assert result_get, "Error: GET command failed" if not result_get: raise Exception('ERROR: GET command failed')
print("Successfully GET keyvalue pair") print("Successfully GET keyvalue pair")
db_size = self.redis_conn.dbsize() db_size = self.redis_conn.dbsize()
assert db_size > 0, "Database size not valid" if db_size <= 0: raise Exception("Database size not valid")
result_delete = self.redis_conn.delete(key) result_delete = self.redis_conn.delete(key)
assert result_delete == 1, "Error: Delete command failed" if not result_delete == 1: raise Exception("Error: Delete command failed")
print("Successfully DELETED keyvalue pair") print("Successfully DELETED keyvalue pair")
def test_client_kill(self, client_ip_port_list): def test_client_kill(self, client_ip_port_list):
for client_ip_port in client_ip_port_list: for client_ip_port in client_ip_port_list:
result = self.redis_conn.client_kill(client_ip_port) result = self.redis_conn.client_kill(client_ip_port)
print(result) if not result: raise Exception('Client failed to be removed')
assert result, "Client failed to be removed"
print("Successfully DELETED client") print("Successfully DELETED client")

View File

@ -34,11 +34,9 @@
name: openstack-helm-infra-bandit name: openstack-helm-infra-bandit
run: playbooks/osh-infra-bandit.yaml run: playbooks/osh-infra-bandit.yaml
nodeset: openstack-helm-single-node nodeset: openstack-helm-single-node
# Note(gagehugo): Uncomment this once it passes so that it only runs files:
# when python related files are changed. - ^.*\.py\.tpl$
# files: - ^.*\.py$
# - ^.*\.py\.tpl$
# - ^.*\.py$
- job: - job:
name: openstack-helm-infra name: openstack-helm-infra

View File

@ -19,8 +19,7 @@
check: check:
jobs: jobs:
- openstack-helm-lint - openstack-helm-lint
- openstack-helm-infra-bandit: - openstack-helm-infra-bandit
voting: false
- openstack-helm-infra-aio-logging - openstack-helm-infra-aio-logging
- openstack-helm-infra-aio-monitoring - openstack-helm-infra-aio-monitoring
- openstack-helm-infra-federated-monitoring: - openstack-helm-infra-federated-monitoring: