Merge "Updated probe tests"
This commit is contained in:
commit
a0a8b484cc
@ -13,22 +13,112 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from httplib import HTTPConnection
|
||||
from os import kill
|
||||
from signal import SIGTERM
|
||||
from subprocess import call, Popen
|
||||
from time import sleep
|
||||
from time import sleep, time
|
||||
|
||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||
from swiftclient import get_auth, head_account
|
||||
|
||||
from swift.common.ring import Ring
|
||||
|
||||
|
||||
def kill_pids(pids):
|
||||
for pid in pids.values():
|
||||
def start_server(port, port2server, pids, check=True):
|
||||
server = port2server[port]
|
||||
if server[:-1] in ('account', 'container', 'object'):
|
||||
pids[server] = Popen([
|
||||
'swift-%s-server' % server[:-1],
|
||||
'/etc/swift/%s-server/%s.conf' % (server[:-1], server[-1])]).pid
|
||||
if check:
|
||||
return check_server(port, port2server, pids)
|
||||
else:
|
||||
pids[server] = Popen(['swift-%s-server' % server,
|
||||
'/etc/swift/%s-server.conf' % server]).pid
|
||||
if check:
|
||||
return check_server(port, port2server, pids)
|
||||
return None
|
||||
|
||||
|
||||
def check_server(port, port2server, pids):
|
||||
server = port2server[port]
|
||||
if server[:-1] in ('account', 'container', 'object'):
|
||||
path = '/connect/1/2'
|
||||
if server[:-1] == 'container':
|
||||
path += '/3'
|
||||
elif server[:-1] == 'object':
|
||||
path += '/3/4'
|
||||
try_until = time() + 30
|
||||
while True:
|
||||
try:
|
||||
kill(pid, SIGTERM)
|
||||
except Exception:
|
||||
pass
|
||||
conn = HTTPConnection('127.0.0.1', port)
|
||||
conn.request('GET', path)
|
||||
resp = conn.getresponse()
|
||||
if resp.status != 404:
|
||||
raise Exception(
|
||||
'Unexpected status %s' % resp.status)
|
||||
break
|
||||
except Exception, err:
|
||||
if time() > try_until:
|
||||
print err
|
||||
print 'Giving up on %s:%s after 30 seconds.' % (
|
||||
server, port)
|
||||
raise err
|
||||
sleep(0.1)
|
||||
else:
|
||||
try_until = time() + 30
|
||||
while True:
|
||||
try:
|
||||
url, token = get_auth('http://127.0.0.1:8080/auth/v1.0',
|
||||
'test:tester', 'testing')
|
||||
account = url.split('/')[-1]
|
||||
head_account(url, token)
|
||||
return url, token, account
|
||||
except Exception, err:
|
||||
if time() > try_until:
|
||||
print err
|
||||
print 'Giving up on proxy:8080 after 30 seconds.'
|
||||
raise err
|
||||
sleep(0.1)
|
||||
return None
|
||||
|
||||
|
||||
def kill_server(port, port2server, pids):
|
||||
try:
|
||||
kill(pids[port2server[port]], SIGTERM)
|
||||
except Exception, err:
|
||||
print err
|
||||
try_until = time() + 30
|
||||
while True:
|
||||
try:
|
||||
conn = HTTPConnection('127.0.0.1', port)
|
||||
conn.request('GET', '/')
|
||||
conn.getresponse()
|
||||
except Exception, err:
|
||||
break
|
||||
if time() > try_until:
|
||||
raise Exception(
|
||||
'Still answering on port %s after 30 seconds' % port)
|
||||
sleep(0.1)
|
||||
|
||||
|
||||
def kill_servers(port2server, pids):
|
||||
for port in port2server:
|
||||
kill_server(port, port2server, pids)
|
||||
|
||||
|
||||
def kill_nonprimary_server(primary_nodes, port2server, pids):
|
||||
primary_ports = [n['port'] for n in primary_nodes]
|
||||
for port, server in port2server.iteritems():
|
||||
if port in primary_ports:
|
||||
server_type = server[:-1]
|
||||
break
|
||||
else:
|
||||
raise Exception('Cannot figure out server type for %r' % primary_nodes)
|
||||
for port, server in list(port2server.iteritems()):
|
||||
if server[:-1] == server_type and port not in primary_ports:
|
||||
kill_server(port, port2server, pids)
|
||||
return port
|
||||
|
||||
|
||||
def reset_environment():
|
||||
@ -36,68 +126,58 @@ def reset_environment():
|
||||
pids = {}
|
||||
try:
|
||||
port2server = {}
|
||||
for s, p in (('account', 6002), ('container', 6001), ('object', 6000)):
|
||||
for n in xrange(1, 5):
|
||||
pids['%s%d' % (s, n)] = \
|
||||
Popen(['swift-%s-server' % s,
|
||||
'/etc/swift/%s-server/%d.conf' % (s, n)]).pid
|
||||
port2server[p + (n * 10)] = '%s%d' % (s, n)
|
||||
pids['proxy'] = Popen(['swift-proxy-server',
|
||||
'/etc/swift/proxy-server.conf']).pid
|
||||
for server, port in [('account', 6002), ('container', 6001),
|
||||
('object', 6000)]:
|
||||
for number in xrange(1, 5):
|
||||
port2server[port + (number * 10)] = '%s%d' % (server, number)
|
||||
for port in port2server:
|
||||
start_server(port, port2server, pids, check=False)
|
||||
for port in port2server:
|
||||
check_server(port, port2server, pids)
|
||||
port2server[8080] = 'proxy'
|
||||
url, token, account = start_server(8080, port2server, pids)
|
||||
account_ring = Ring('/etc/swift/account.ring.gz')
|
||||
container_ring = Ring('/etc/swift/container.ring.gz')
|
||||
object_ring = Ring('/etc/swift/object.ring.gz')
|
||||
attempt = 0
|
||||
while True:
|
||||
attempt += 1
|
||||
except BaseException:
|
||||
try:
|
||||
url, token = get_auth('http://127.0.0.1:8080/auth/v1.0',
|
||||
'test:tester', 'testing')
|
||||
account = url.split('/')[-1]
|
||||
head_account(url, token)
|
||||
break
|
||||
except Exception, err:
|
||||
if attempt > 9:
|
||||
print err
|
||||
print 'Giving up after %s retries.' % attempt
|
||||
raise err
|
||||
print err
|
||||
print 'Retrying in 2 seconds...'
|
||||
sleep(2)
|
||||
except BaseException, err:
|
||||
kill_pids(pids)
|
||||
raise err
|
||||
raise
|
||||
finally:
|
||||
try:
|
||||
kill_servers(port2server, pids)
|
||||
except Exception:
|
||||
pass
|
||||
return pids, port2server, account_ring, container_ring, object_ring, url, \
|
||||
token, account
|
||||
|
||||
|
||||
def get_to_final_state():
|
||||
ps = []
|
||||
processes = []
|
||||
for job in ('account-replicator', 'container-replicator',
|
||||
'object-replicator'):
|
||||
for n in xrange(1, 5):
|
||||
ps.append(Popen(['swift-%s' % job,
|
||||
'/etc/swift/%s-server/%d.conf' %
|
||||
(job.split('-')[0], n),
|
||||
for number in xrange(1, 5):
|
||||
processes.append(Popen([
|
||||
'swift-%s' % job,
|
||||
'/etc/swift/%s-server/%d.conf' % (job.split('-')[0], number),
|
||||
'once']))
|
||||
for p in ps:
|
||||
p.wait()
|
||||
ps = []
|
||||
for process in processes:
|
||||
process.wait()
|
||||
processes = []
|
||||
for job in ('container-updater', 'object-updater'):
|
||||
for n in xrange(1, 5):
|
||||
ps.append(Popen(['swift-%s' % job,
|
||||
'/etc/swift/%s-server/%d.conf' %
|
||||
(job.split('-')[0], n),
|
||||
for number in xrange(1, 5):
|
||||
processes.append(Popen([
|
||||
'swift-%s' % job,
|
||||
'/etc/swift/%s-server/%d.conf' % (job.split('-')[0], number),
|
||||
'once']))
|
||||
for p in ps:
|
||||
p.wait()
|
||||
ps = []
|
||||
for process in processes:
|
||||
process.wait()
|
||||
processes = []
|
||||
for job in ('account-replicator', 'container-replicator',
|
||||
'object-replicator'):
|
||||
for n in xrange(1, 5):
|
||||
ps.append(Popen(['swift-%s' % job,
|
||||
'/etc/swift/%s-server/%d.conf' %
|
||||
(job.split('-')[0], n),
|
||||
for number in xrange(1, 5):
|
||||
processes.append(Popen([
|
||||
'swift-%s' % job,
|
||||
'/etc/swift/%s-server/%d.conf' % (job.split('-')[0], number),
|
||||
'once']))
|
||||
for p in ps:
|
||||
p.wait()
|
||||
for process in processes:
|
||||
process.wait()
|
||||
|
@ -14,28 +14,46 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
from os import kill
|
||||
from signal import SIGTERM
|
||||
from subprocess import Popen
|
||||
from time import sleep
|
||||
from unittest import main, TestCase
|
||||
|
||||
from swiftclient import client
|
||||
|
||||
from swift.common import direct_client
|
||||
from test.probe.common import get_to_final_state, kill_pids, reset_environment
|
||||
from test.probe.common import get_to_final_state, kill_nonprimary_server, \
|
||||
kill_server, kill_servers, reset_environment, start_server
|
||||
|
||||
|
||||
class TestAccountFailures(unittest.TestCase):
|
||||
class TestAccountFailures(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.pids, self.port2server, self.account_ring, self.container_ring, \
|
||||
self.object_ring, self.url, self.token, self.account = \
|
||||
reset_environment()
|
||||
(self.pids, self.port2server, self.account_ring, self.container_ring,
|
||||
self.object_ring, self.url, self.token,
|
||||
self.account) = reset_environment()
|
||||
|
||||
def tearDown(self):
|
||||
kill_pids(self.pids)
|
||||
kill_servers(self.port2server, self.pids)
|
||||
|
||||
def test_main(self):
|
||||
# Create container1 and container2
|
||||
# Assert account level sees them
|
||||
# Create container2/object1
|
||||
# Assert account level doesn't see it yet
|
||||
# Get to final state
|
||||
# Assert account level now sees the container2/object1
|
||||
# Kill account servers excepting two of the primaries
|
||||
# Delete container1
|
||||
# Assert account level knows container1 is gone but doesn't know about
|
||||
# container2/object2 yet
|
||||
# Put container2/object2
|
||||
# Run container updaters
|
||||
# Assert account level now knows about container2/object2
|
||||
# Restart other primary account server
|
||||
# Assert that server doesn't know about container1's deletion or the
|
||||
# new container2/object2 yet
|
||||
# Get to final state
|
||||
# Assert that server is now up to date
|
||||
|
||||
container1 = 'container1'
|
||||
client.put_container(self.url, self.token, container1)
|
||||
container2 = 'container2'
|
||||
@ -46,15 +64,15 @@ class TestAccountFailures(unittest.TestCase):
|
||||
self.assertEquals(headers['x-account-bytes-used'], '0')
|
||||
found1 = False
|
||||
found2 = False
|
||||
for c in containers:
|
||||
if c['name'] == container1:
|
||||
for container in containers:
|
||||
if container['name'] == container1:
|
||||
found1 = True
|
||||
self.assertEquals(c['count'], 0)
|
||||
self.assertEquals(c['bytes'], 0)
|
||||
elif c['name'] == container2:
|
||||
self.assertEquals(container['count'], 0)
|
||||
self.assertEquals(container['bytes'], 0)
|
||||
elif container['name'] == container2:
|
||||
found2 = True
|
||||
self.assertEquals(c['count'], 0)
|
||||
self.assertEquals(c['bytes'], 0)
|
||||
self.assertEquals(container['count'], 0)
|
||||
self.assertEquals(container['bytes'], 0)
|
||||
self.assert_(found1)
|
||||
self.assert_(found2)
|
||||
|
||||
@ -65,15 +83,15 @@ class TestAccountFailures(unittest.TestCase):
|
||||
self.assertEquals(headers['x-account-bytes-used'], '0')
|
||||
found1 = False
|
||||
found2 = False
|
||||
for c in containers:
|
||||
if c['name'] == container1:
|
||||
for container in containers:
|
||||
if container['name'] == container1:
|
||||
found1 = True
|
||||
self.assertEquals(c['count'], 0)
|
||||
self.assertEquals(c['bytes'], 0)
|
||||
elif c['name'] == container2:
|
||||
self.assertEquals(container['count'], 0)
|
||||
self.assertEquals(container['bytes'], 0)
|
||||
elif container['name'] == container2:
|
||||
found2 = True
|
||||
self.assertEquals(c['count'], 0)
|
||||
self.assertEquals(c['bytes'], 0)
|
||||
self.assertEquals(container['count'], 0)
|
||||
self.assertEquals(container['bytes'], 0)
|
||||
self.assert_(found1)
|
||||
self.assert_(found2)
|
||||
|
||||
@ -84,20 +102,21 @@ class TestAccountFailures(unittest.TestCase):
|
||||
self.assertEquals(headers['x-account-bytes-used'], '4')
|
||||
found1 = False
|
||||
found2 = False
|
||||
for c in containers:
|
||||
if c['name'] == container1:
|
||||
for container in containers:
|
||||
if container['name'] == container1:
|
||||
found1 = True
|
||||
self.assertEquals(c['count'], 0)
|
||||
self.assertEquals(c['bytes'], 0)
|
||||
elif c['name'] == container2:
|
||||
self.assertEquals(container['count'], 0)
|
||||
self.assertEquals(container['bytes'], 0)
|
||||
elif container['name'] == container2:
|
||||
found2 = True
|
||||
self.assertEquals(c['count'], 1)
|
||||
self.assertEquals(c['bytes'], 4)
|
||||
self.assertEquals(container['count'], 1)
|
||||
self.assertEquals(container['bytes'], 4)
|
||||
self.assert_(found1)
|
||||
self.assert_(found2)
|
||||
|
||||
apart, anodes = self.account_ring.get_nodes(self.account)
|
||||
kill(self.pids[self.port2server[anodes[0]['port']]], SIGTERM)
|
||||
kill_nonprimary_server(anodes, self.port2server, self.pids)
|
||||
kill_server(anodes[0]['port'], self.port2server, self.pids)
|
||||
|
||||
client.delete_container(self.url, self.token, container1)
|
||||
client.put_object(self.url, self.token, container2, 'object2', '12345')
|
||||
@ -107,46 +126,42 @@ class TestAccountFailures(unittest.TestCase):
|
||||
self.assertEquals(headers['x-account-bytes-used'], '4')
|
||||
found1 = False
|
||||
found2 = False
|
||||
for c in containers:
|
||||
if c['name'] == container1:
|
||||
for container in containers:
|
||||
if container['name'] == container1:
|
||||
found1 = True
|
||||
elif c['name'] == container2:
|
||||
elif container['name'] == container2:
|
||||
found2 = True
|
||||
self.assertEquals(c['count'], 1)
|
||||
self.assertEquals(c['bytes'], 4)
|
||||
self.assertEquals(container['count'], 1)
|
||||
self.assertEquals(container['bytes'], 4)
|
||||
self.assert_(not found1)
|
||||
self.assert_(found2)
|
||||
|
||||
ps = []
|
||||
for n in xrange(1, 5):
|
||||
ps.append(Popen(['swift-container-updater',
|
||||
'/etc/swift/container-server/%d.conf' % n,
|
||||
processes = []
|
||||
for node in xrange(1, 5):
|
||||
processes.append(Popen([
|
||||
'swift-container-updater',
|
||||
'/etc/swift/container-server/%d.conf' % node,
|
||||
'once']))
|
||||
for p in ps:
|
||||
p.wait()
|
||||
for process in processes:
|
||||
process.wait()
|
||||
headers, containers = client.get_account(self.url, self.token)
|
||||
self.assertEquals(headers['x-account-container-count'], '1')
|
||||
self.assertEquals(headers['x-account-object-count'], '2')
|
||||
self.assertEquals(headers['x-account-bytes-used'], '9')
|
||||
found1 = False
|
||||
found2 = False
|
||||
for c in containers:
|
||||
if c['name'] == container1:
|
||||
for container in containers:
|
||||
if container['name'] == container1:
|
||||
found1 = True
|
||||
elif c['name'] == container2:
|
||||
elif container['name'] == container2:
|
||||
found2 = True
|
||||
self.assertEquals(c['count'], 2)
|
||||
self.assertEquals(c['bytes'], 9)
|
||||
self.assertEquals(container['count'], 2)
|
||||
self.assertEquals(container['bytes'], 9)
|
||||
self.assert_(not found1)
|
||||
self.assert_(found2)
|
||||
|
||||
self.pids[self.port2server[anodes[0]['port']]] = \
|
||||
Popen(['swift-account-server',
|
||||
'/etc/swift/account-server/%d.conf' %
|
||||
((anodes[0]['port'] - 6002) / 10)]).pid
|
||||
sleep(2)
|
||||
# This is the earlier counts and bytes because the first node doesn't
|
||||
# have the newest udpates yet.
|
||||
start_server(anodes[0]['port'], self.port2server, self.pids)
|
||||
|
||||
headers, containers = \
|
||||
direct_client.direct_get_account(anodes[0], apart, self.account)
|
||||
self.assertEquals(headers['x-account-container-count'], '2')
|
||||
@ -154,17 +169,13 @@ class TestAccountFailures(unittest.TestCase):
|
||||
self.assertEquals(headers['x-account-bytes-used'], '4')
|
||||
found1 = False
|
||||
found2 = False
|
||||
for c in containers:
|
||||
if c['name'] == container1:
|
||||
for container in containers:
|
||||
if container['name'] == container1:
|
||||
found1 = True
|
||||
elif c['name'] == container2:
|
||||
elif container['name'] == container2:
|
||||
found2 = True
|
||||
# This is the earlier count and bytes because the first node
|
||||
# doesn't have the newest udpates yet.
|
||||
self.assertEquals(c['count'], 1)
|
||||
self.assertEquals(c['bytes'], 4)
|
||||
# This okay because the first node hasn't got the update that
|
||||
# container1 was deleted yet.
|
||||
self.assertEquals(container['count'], 1)
|
||||
self.assertEquals(container['bytes'], 4)
|
||||
self.assert_(found1)
|
||||
self.assert_(found2)
|
||||
|
||||
@ -176,16 +187,16 @@ class TestAccountFailures(unittest.TestCase):
|
||||
self.assertEquals(headers['x-account-bytes-used'], '9')
|
||||
found1 = False
|
||||
found2 = False
|
||||
for c in containers:
|
||||
if c['name'] == container1:
|
||||
for container in containers:
|
||||
if container['name'] == container1:
|
||||
found1 = True
|
||||
elif c['name'] == container2:
|
||||
elif container['name'] == container2:
|
||||
found2 = True
|
||||
self.assertEquals(c['count'], 2)
|
||||
self.assertEquals(c['bytes'], 9)
|
||||
self.assertEquals(container['count'], 2)
|
||||
self.assertEquals(container['bytes'], 9)
|
||||
self.assert_(not found1)
|
||||
self.assert_(found2)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
main()
|
||||
|
@ -14,325 +14,103 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
import os
|
||||
from os import kill
|
||||
from signal import SIGTERM
|
||||
from subprocess import Popen
|
||||
from time import sleep
|
||||
from os import listdir
|
||||
from os.path import join as path_join
|
||||
from unittest import main, TestCase
|
||||
from uuid import uuid4
|
||||
import eventlet
|
||||
import sqlite3
|
||||
|
||||
from eventlet import GreenPool, Timeout
|
||||
from sqlite3 import connect
|
||||
from swiftclient import client
|
||||
|
||||
from swift.common import direct_client
|
||||
from swift.common.utils import hash_path, readconf
|
||||
|
||||
from test.probe.common import get_to_final_state, kill_pids, reset_environment
|
||||
from test.probe.common import get_to_final_state, kill_nonprimary_server, \
|
||||
kill_server, kill_servers, reset_environment, start_server
|
||||
|
||||
|
||||
class TestContainerFailures(unittest.TestCase):
|
||||
def get_db_file_path(obj_dir):
|
||||
files = sorted(listdir(obj_dir), reverse=True)
|
||||
for filename in files:
|
||||
if filename.endswith('db'):
|
||||
return path_join(obj_dir, filename)
|
||||
|
||||
|
||||
class TestContainerFailures(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.pids, self.port2server, self.account_ring, self.container_ring, \
|
||||
self.object_ring, self.url, self.token, self.account = \
|
||||
reset_environment()
|
||||
(self.pids, self.port2server, self.account_ring, self.container_ring,
|
||||
self.object_ring, self.url, self.token,
|
||||
self.account) = reset_environment()
|
||||
|
||||
def tearDown(self):
|
||||
kill_pids(self.pids)
|
||||
|
||||
def test_first_node_fail(self):
|
||||
container = 'container-%s' % uuid4()
|
||||
client.put_container(self.url, self.token, container)
|
||||
self.assert_(container in [c['name'] for c in
|
||||
client.get_account(self.url, self.token)[1]])
|
||||
|
||||
object1 = 'object1'
|
||||
client.put_object(self.url, self.token, container, object1, 'test')
|
||||
self.assert_(container in [c['name'] for c in
|
||||
client.get_account(self.url, self.token)[1]])
|
||||
self.assert_(object1 in [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]])
|
||||
|
||||
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
|
||||
kill(self.pids[self.port2server[cnodes[0]['port']]], SIGTERM)
|
||||
|
||||
client.delete_object(self.url, self.token, container, object1)
|
||||
self.assert_(container in [c['name'] for c in
|
||||
client.get_account(self.url, self.token)[1]])
|
||||
self.assert_(object1 not in [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]])
|
||||
|
||||
self.pids[self.port2server[cnodes[0]['port']]] = \
|
||||
Popen(['swift-container-server',
|
||||
'/etc/swift/container-server/%d.conf' %
|
||||
((cnodes[0]['port'] - 6001) / 10)]).pid
|
||||
sleep(2)
|
||||
self.assert_(container in [c['name'] for c in
|
||||
client.get_account(self.url, self.token)[1]])
|
||||
# This okay because the first node hasn't got the update that the
|
||||
# object was deleted yet.
|
||||
self.assert_(object1 in [o['name'] for o in
|
||||
direct_client.direct_get_container(cnodes[0], cpart,
|
||||
self.account, container)[1]])
|
||||
|
||||
# Unfortunately, the following might pass or fail, depending on the
|
||||
# position of the account server associated with the first container
|
||||
# server we had killed. If the associated happens to be the first
|
||||
# account server, this'll pass, otherwise the first account server will
|
||||
# serve the listing and not have the container.
|
||||
# self.assert_(container in [c['name'] for c in
|
||||
# client.get_account(self.url, self.token)[1]])
|
||||
|
||||
object2 = 'object2'
|
||||
# This will work because at least one (in this case, just one) account
|
||||
# server has to indicate the container exists for the put to continue.
|
||||
client.put_object(self.url, self.token, container, object2, 'test')
|
||||
# First node still doesn't know object1 was deleted yet; this is okay.
|
||||
self.assert_(object1 in [o['name'] for o in
|
||||
direct_client.direct_get_container(cnodes[0], cpart,
|
||||
self.account, container)[1]])
|
||||
# And, of course, our new object2 exists.
|
||||
self.assert_(object2 in [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]])
|
||||
kill_servers(self.port2server, self.pids)
|
||||
|
||||
def test_one_node_fails(self):
|
||||
# Create container1
|
||||
# Kill container1 servers excepting two of the primaries
|
||||
# Delete container1
|
||||
# Restart other container1 primary server
|
||||
# Create container1/object1 (allowed because at least server thinks the
|
||||
# container exists)
|
||||
# Get to a final state
|
||||
# Assert all container1 servers indicate container1 is alive and
|
||||
# well with object1
|
||||
# Assert account level also indicates container1 is alive and
|
||||
# well with object1
|
||||
container1 = 'container-%s' % uuid4()
|
||||
cpart, cnodes = self.container_ring.get_nodes(self.account, container1)
|
||||
client.put_container(self.url, self.token, container1)
|
||||
kill_nonprimary_server(cnodes, self.port2server, self.pids)
|
||||
kill_server(cnodes[0]['port'], self.port2server, self.pids)
|
||||
client.delete_container(self.url, self.token, container1)
|
||||
start_server(cnodes[0]['port'], self.port2server, self.pids)
|
||||
client.put_object(self.url, self.token, container1, 'object1', '123')
|
||||
get_to_final_state()
|
||||
# Our container delete never "finalized" because we started using it
|
||||
# before the delete settled.
|
||||
self.assert_(container in [c['name'] for c in
|
||||
client.get_account(self.url, self.token)[1]])
|
||||
# And, so our object2 should still exist and object1's delete should
|
||||
# have finalized.
|
||||
self.assert_(object1 not in [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]])
|
||||
self.assert_(object2 in [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]])
|
||||
|
||||
def test_second_node_fail(self):
|
||||
container = 'container-%s' % uuid4()
|
||||
client.put_container(self.url, self.token, container)
|
||||
self.assert_(container in [c['name'] for c in
|
||||
client.get_account(self.url, self.token)[1]])
|
||||
|
||||
object1 = 'object1'
|
||||
client.put_object(self.url, self.token, container, object1, 'test')
|
||||
self.assert_(container in [c['name'] for c in
|
||||
client.get_account(self.url, self.token)[1]])
|
||||
self.assert_(object1 in [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]])
|
||||
|
||||
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
|
||||
kill(self.pids[self.port2server[cnodes[1]['port']]], SIGTERM)
|
||||
|
||||
client.delete_object(self.url, self.token, container, object1)
|
||||
self.assert_(container in [c['name'] for c in
|
||||
client.get_account(self.url, self.token)[1]])
|
||||
self.assert_(object1 not in [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]])
|
||||
|
||||
self.pids[self.port2server[cnodes[1]['port']]] = \
|
||||
Popen(['swift-container-server',
|
||||
'/etc/swift/container-server/%d.conf' %
|
||||
((cnodes[1]['port'] - 6001) / 10)]).pid
|
||||
sleep(2)
|
||||
self.assert_(container in [c['name'] for c in
|
||||
client.get_account(self.url, self.token)[1]])
|
||||
self.assert_(object1 not in [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]])
|
||||
|
||||
# Unfortunately, the following might pass or fail, depending on the
|
||||
# position of the account server associated with the first container
|
||||
# server we had killed. If the associated happens to be the first
|
||||
# account server, this'll pass, otherwise the first account server will
|
||||
# serve the listing and not have the container.
|
||||
# self.assert_(container in [c['name'] for c in
|
||||
# client.get_account(self.url, self.token)[1]])
|
||||
|
||||
object2 = 'object2'
|
||||
# This will work because at least one (in this case, just one) account
|
||||
# server has to indicate the container exists for the put to continue.
|
||||
client.put_object(self.url, self.token, container, object2, 'test')
|
||||
self.assert_(object1 not in [o['name'] for o in
|
||||
direct_client.direct_get_container(cnodes[0], cpart,
|
||||
self.account, container)[1]])
|
||||
# And, of course, our new object2 exists.
|
||||
self.assert_(object2 in [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]])
|
||||
for cnode in cnodes:
|
||||
self.assertEquals(
|
||||
[o['name'] for o in direct_client.direct_get_container(
|
||||
cnode, cpart, self.account, container1)[1]],
|
||||
['object1'])
|
||||
headers, containers = client.get_account(self.url, self.token)
|
||||
self.assertEquals(headers['x-account-container-count'], '1')
|
||||
self.assertEquals(headers['x-account-object-count'], '1')
|
||||
self.assertEquals(headers['x-account-bytes-used'], '3')
|
||||
|
||||
def test_two_nodes_fail(self):
|
||||
# Create container1
|
||||
# Kill container1 servers excepting one of the primaries
|
||||
# Delete container1 directly to the one primary still up
|
||||
# Restart other container1 servers
|
||||
# Get to a final state
|
||||
# Assert all container1 servers indicate container1 is gone (happens
|
||||
# because the one node that knew about the delete replicated to the
|
||||
# others.)
|
||||
# Assert account level also indicates container1 is gone
|
||||
container1 = 'container-%s' % uuid4()
|
||||
cpart, cnodes = self.container_ring.get_nodes(self.account, container1)
|
||||
client.put_container(self.url, self.token, container1)
|
||||
cnp_port = kill_nonprimary_server(cnodes, self.port2server, self.pids)
|
||||
kill_server(cnodes[0]['port'], self.port2server, self.pids)
|
||||
kill_server(cnodes[1]['port'], self.port2server, self.pids)
|
||||
direct_client.direct_delete_container(cnodes[2], cpart, self.account,
|
||||
container1)
|
||||
start_server(cnodes[0]['port'], self.port2server, self.pids)
|
||||
start_server(cnodes[1]['port'], self.port2server, self.pids)
|
||||
start_server(cnp_port, self.port2server, self.pids)
|
||||
get_to_final_state()
|
||||
# Our container delete never "finalized" because we started using it
|
||||
# before the delete settled.
|
||||
self.assert_(container in [c['name'] for c in
|
||||
client.get_account(self.url, self.token)[1]])
|
||||
# And, so our object2 should still exist and object1's delete should
|
||||
# have finalized.
|
||||
self.assert_(object1 not in [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]])
|
||||
self.assert_(object2 in [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]])
|
||||
|
||||
def test_first_two_nodes_fail(self):
|
||||
container = 'container-%s' % uuid4()
|
||||
client.put_container(self.url, self.token, container)
|
||||
self.assert_(container in [c['name'] for c in
|
||||
client.get_account(self.url, self.token)[1]])
|
||||
|
||||
object1 = 'object1'
|
||||
client.put_object(self.url, self.token, container, object1, 'test')
|
||||
self.assert_(container in [c['name'] for c in
|
||||
client.get_account(self.url, self.token)[1]])
|
||||
self.assert_(object1 in [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]])
|
||||
|
||||
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
|
||||
for x in xrange(2):
|
||||
kill(self.pids[self.port2server[cnodes[x]['port']]], SIGTERM)
|
||||
|
||||
client.delete_object(self.url, self.token, container, object1)
|
||||
self.assert_(container in [c['name'] for c in
|
||||
client.get_account(self.url, self.token)[1]])
|
||||
self.assert_(object1 not in [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]])
|
||||
|
||||
for x in xrange(2):
|
||||
self.pids[self.port2server[cnodes[x]['port']]] = \
|
||||
Popen(['swift-container-server',
|
||||
'/etc/swift/container-server/%d.conf' %
|
||||
((cnodes[x]['port'] - 6001) / 10)]).pid
|
||||
sleep(2)
|
||||
self.assert_(container in [c['name'] for c in
|
||||
client.get_account(self.url, self.token)[1]])
|
||||
# This okay because the first node hasn't got the update that the
|
||||
# object was deleted yet.
|
||||
self.assert_(object1 in [o['name'] for o in
|
||||
direct_client.direct_get_container(cnodes[0], cpart,
|
||||
self.account, container)[1]])
|
||||
|
||||
# This fails because all three nodes have to indicate deletion before
|
||||
# we tell the user it worked. Since the first node 409s (it hasn't got
|
||||
# the update that the object was deleted yet), the whole must 503
|
||||
# (until every is synced up, then the delete would work).
|
||||
for cnode in cnodes:
|
||||
exc = None
|
||||
try:
|
||||
client.delete_container(self.url, self.token, container)
|
||||
direct_client.direct_get_container(cnode, cpart, self.account,
|
||||
container1)
|
||||
except client.ClientException, err:
|
||||
exc = err
|
||||
self.assert_(exc)
|
||||
self.assert_(exc.http_status, 503)
|
||||
# Unfortunately, the following might pass or fail, depending on the
|
||||
# position of the account server associated with the first container
|
||||
# server we had killed. If the associated happens to be the first
|
||||
# account server, this'll pass, otherwise the first account server will
|
||||
# serve the listing and not have the container.
|
||||
# self.assert_(container in [c['name'] for c in
|
||||
# client.get_account(self.url, self.token)[1]])
|
||||
|
||||
object2 = 'object2'
|
||||
# This will work because at least one (in this case, just one) account
|
||||
# server has to indicate the container exists for the put to continue.
|
||||
client.put_object(self.url, self.token, container, object2, 'test')
|
||||
# First node still doesn't know object1 was deleted yet; this is okay.
|
||||
self.assert_(object1 in [o['name'] for o in
|
||||
direct_client.direct_get_container(cnodes[0], cpart,
|
||||
self.account, container)[1]])
|
||||
# And, of course, our new object2 exists.
|
||||
self.assert_(object2 in [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]])
|
||||
|
||||
get_to_final_state()
|
||||
# Our container delete never "finalized" because we started using it
|
||||
# before the delete settled.
|
||||
self.assert_(container in [c['name'] for c in
|
||||
client.get_account(self.url, self.token)[1]])
|
||||
# And, so our object2 should still exist and object1's delete should
|
||||
# have finalized.
|
||||
self.assert_(object1 not in [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]])
|
||||
self.assert_(object2 in [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]])
|
||||
|
||||
def test_last_two_nodes_fail(self):
|
||||
container = 'container-%s' % uuid4()
|
||||
client.put_container(self.url, self.token, container)
|
||||
self.assert_(container in [c['name'] for c in
|
||||
client.get_account(self.url, self.token)[1]])
|
||||
|
||||
object1 = 'object1'
|
||||
client.put_object(self.url, self.token, container, object1, 'test')
|
||||
self.assert_(container in [c['name'] for c in
|
||||
client.get_account(self.url, self.token)[1]])
|
||||
self.assert_(object1 in [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]])
|
||||
|
||||
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
|
||||
for x in (1, 2):
|
||||
kill(self.pids[self.port2server[cnodes[x]['port']]], SIGTERM)
|
||||
|
||||
client.delete_object(self.url, self.token, container, object1)
|
||||
self.assert_(container in [c['name'] for c in
|
||||
client.get_account(self.url, self.token)[1]])
|
||||
self.assert_(object1 not in [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]])
|
||||
|
||||
for x in (1, 2):
|
||||
self.pids[self.port2server[cnodes[x]['port']]] = \
|
||||
Popen(['swift-container-server',
|
||||
'/etc/swift/container-server/%d.conf' %
|
||||
((cnodes[x]['port'] - 6001) / 10)]).pid
|
||||
sleep(2)
|
||||
self.assert_(container in [c['name'] for c in
|
||||
client.get_account(self.url, self.token)[1]])
|
||||
self.assert_(object1 not in [o['name'] for o in
|
||||
direct_client.direct_get_container(cnodes[0], cpart,
|
||||
self.account, container)[1]])
|
||||
|
||||
# This fails because all three nodes have to indicate deletion before
|
||||
# we tell the user it worked. Since the first node 409s (it hasn't got
|
||||
# the update that the object was deleted yet), the whole must 503
|
||||
# (until every is synced up, then the delete would work).
|
||||
exc = None
|
||||
try:
|
||||
client.delete_container(self.url, self.token, container)
|
||||
except client.ClientException, err:
|
||||
exc = err
|
||||
self.assert_(exc)
|
||||
self.assert_(exc.http_status, 503)
|
||||
# Unfortunately, the following might pass or fail, depending on the
|
||||
# position of the account server associated with the first container
|
||||
# server we had killed. If the associated happens to be the first
|
||||
# account server, this'll pass, otherwise the first account server will
|
||||
# serve the listing and not have the container.
|
||||
# self.assert_(container in [c['name'] for c in
|
||||
# client.get_account(self.url, self.token)[1]])
|
||||
|
||||
object2 = 'object2'
|
||||
# This will work because at least one (in this case, just one) account
|
||||
# server has to indicate the container exists for the put to continue.
|
||||
client.put_object(self.url, self.token, container, object2, 'test')
|
||||
self.assert_(object1 not in [o['name'] for o in
|
||||
direct_client.direct_get_container(cnodes[0], cpart,
|
||||
self.account, container)[1]])
|
||||
# And, of course, our new object2 exists.
|
||||
self.assert_(object2 in [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]])
|
||||
|
||||
get_to_final_state()
|
||||
# Our container delete never "finalized" because we started using it
|
||||
# before the delete settled.
|
||||
self.assert_(container in [c['name'] for c in
|
||||
client.get_account(self.url, self.token)[1]])
|
||||
# And, so our object2 should still exist and object1's delete should
|
||||
# have finalized.
|
||||
self.assert_(object1 not in [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]])
|
||||
self.assert_(object2 in [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]])
|
||||
|
||||
def _get_db_file_path(self, obj_dir):
|
||||
files = sorted(os.listdir(obj_dir), reverse=True)
|
||||
for file in files:
|
||||
if file.endswith('db'):
|
||||
return os.path.join(obj_dir, file)
|
||||
self.assertEquals(exc.http_status, 404)
|
||||
headers, containers = client.get_account(self.url, self.token)
|
||||
self.assertEquals(headers['x-account-container-count'], '0')
|
||||
self.assertEquals(headers['x-account-object-count'], '0')
|
||||
self.assertEquals(headers['x-account-bytes-used'], '0')
|
||||
|
||||
def _get_container_db_files(self, container):
|
||||
opart, onodes = self.container_ring.get_nodes(self.account, container)
|
||||
@ -348,7 +126,7 @@ class TestContainerFailures(unittest.TestCase):
|
||||
obj_dir = '%s/%s/containers/%s/%s/%s/' % (devices,
|
||||
device, opart,
|
||||
hash_str[-3:], hash_str)
|
||||
db_files.append(self._get_db_file_path(obj_dir))
|
||||
db_files.append(get_db_file_path(obj_dir))
|
||||
|
||||
return db_files
|
||||
|
||||
@ -360,29 +138,31 @@ class TestContainerFailures(unittest.TestCase):
|
||||
db_files = self._get_container_db_files(container)
|
||||
db_conns = []
|
||||
for i in range(num_locks):
|
||||
db_conn = sqlite3.connect(db_files[i])
|
||||
db_conn = connect(db_files[i])
|
||||
db_conn.execute('begin exclusive transaction')
|
||||
db_conns.append(db_conn)
|
||||
if catch_503:
|
||||
exc = None
|
||||
try:
|
||||
client.delete_container(self.url, self.token, container)
|
||||
except client.ClientException, e:
|
||||
self.assertEquals(e.http_status, 503)
|
||||
except client.ClientException, err:
|
||||
exc = err
|
||||
self.assertEquals(exc.http_status, 503)
|
||||
else:
|
||||
client.delete_container(self.url, self.token, container)
|
||||
|
||||
pool = eventlet.GreenPool()
|
||||
pool = GreenPool()
|
||||
try:
|
||||
with eventlet.Timeout(15):
|
||||
p = pool.spawn(run_test, 1, False)
|
||||
r = pool.spawn(run_test, 2, True)
|
||||
q = pool.spawn(run_test, 3, True)
|
||||
with Timeout(15):
|
||||
pool.spawn(run_test, 1, False)
|
||||
pool.spawn(run_test, 2, True)
|
||||
pool.spawn(run_test, 3, True)
|
||||
pool.waitall()
|
||||
except eventlet.Timeout, e:
|
||||
except Timeout, err:
|
||||
raise Exception(
|
||||
"The server did not return a 503 on container db locks, "
|
||||
"it just hangs: %s" % e)
|
||||
"it just hangs: %s" % err)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
main()
|
||||
|
@ -14,56 +14,57 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
from os import kill
|
||||
from signal import SIGTERM
|
||||
from subprocess import Popen
|
||||
from time import sleep
|
||||
from unittest import main, TestCase
|
||||
from uuid import uuid4
|
||||
|
||||
from swiftclient import client
|
||||
|
||||
from swift.common import direct_client
|
||||
|
||||
from test.probe.common import kill_pids, reset_environment
|
||||
from test.probe.common import kill_nonprimary_server, kill_server, \
|
||||
kill_servers, reset_environment, start_server
|
||||
|
||||
|
||||
class TestObjectAsyncUpdate(unittest.TestCase):
|
||||
class TestObjectAsyncUpdate(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.pids, self.port2server, self.account_ring, self.container_ring, \
|
||||
self.object_ring, self.url, self.token, self.account = \
|
||||
reset_environment()
|
||||
(self.pids, self.port2server, self.account_ring, self.container_ring,
|
||||
self.object_ring, self.url, self.token,
|
||||
self.account) = reset_environment()
|
||||
|
||||
def tearDown(self):
|
||||
kill_pids(self.pids)
|
||||
kill_servers(self.port2server, self.pids)
|
||||
|
||||
def test_main(self):
|
||||
# Create container
|
||||
# Kill container servers excepting two of the primaries
|
||||
# Create container/obj
|
||||
# Restart other primary server
|
||||
# Assert it does not know about container/obj
|
||||
# Run the object-updaters
|
||||
# Assert the other primary server now knows about container/obj
|
||||
container = 'container-%s' % uuid4()
|
||||
client.put_container(self.url, self.token, container)
|
||||
apart, anodes = self.account_ring.get_nodes(self.account)
|
||||
anode = anodes[0]
|
||||
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
|
||||
cnode = cnodes[0]
|
||||
kill(self.pids[self.port2server[cnode['port']]], SIGTERM)
|
||||
kill_nonprimary_server(cnodes, self.port2server, self.pids)
|
||||
kill_server(cnode['port'], self.port2server, self.pids)
|
||||
obj = 'object-%s' % uuid4()
|
||||
client.put_object(self.url, self.token, container, obj, '')
|
||||
self.pids[self.port2server[cnode['port']]] = \
|
||||
Popen(['swift-container-server',
|
||||
'/etc/swift/container-server/%d.conf' %
|
||||
((cnode['port'] - 6001) / 10)]).pid
|
||||
sleep(2)
|
||||
self.assert_(not direct_client.direct_get_container(cnode, cpart,
|
||||
self.account, container)[1])
|
||||
ps = []
|
||||
for n in xrange(1, 5):
|
||||
ps.append(Popen(['swift-object-updater',
|
||||
'/etc/swift/object-server/%d.conf' % n, 'once']))
|
||||
for p in ps:
|
||||
p.wait()
|
||||
objs = [o['name'] for o in direct_client.direct_get_container(cnode,
|
||||
cpart, self.account, container)[1]]
|
||||
start_server(cnode['port'], self.port2server, self.pids)
|
||||
self.assert_(not direct_client.direct_get_container(
|
||||
cnode, cpart, self.account, container)[1])
|
||||
processes = []
|
||||
for node in xrange(1, 5):
|
||||
processes.append(Popen(['swift-object-updater',
|
||||
'/etc/swift/object-server/%d.conf' % node,
|
||||
'once']))
|
||||
for process in processes:
|
||||
process.wait()
|
||||
objs = [o['name'] for o in direct_client.direct_get_container(
|
||||
cnode, cpart, self.account, container)[1]]
|
||||
self.assert_(obj in objs)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
main()
|
||||
|
@ -14,31 +14,34 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
import os
|
||||
from os import listdir, unlink
|
||||
from os.path import join as path_join
|
||||
from unittest import main, TestCase
|
||||
from uuid import uuid4
|
||||
|
||||
from swiftclient import client
|
||||
|
||||
from swift.common import direct_client
|
||||
from swift.common.utils import hash_path, readconf
|
||||
from swift.obj.server import write_metadata, read_metadata
|
||||
from test.probe.common import kill_pids, reset_environment
|
||||
from test.probe.common import kill_servers, reset_environment
|
||||
|
||||
|
||||
class TestObjectFailures(unittest.TestCase):
|
||||
def get_data_file_path(obj_dir):
|
||||
files = sorted(listdir(obj_dir), reverse=True)
|
||||
for filename in files:
|
||||
return path_join(obj_dir, filename)
|
||||
|
||||
|
||||
class TestObjectFailures(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.pids, self.port2server, self.account_ring, self.container_ring, \
|
||||
self.object_ring, self.url, self.token, self.account = \
|
||||
reset_environment()
|
||||
(self.pids, self.port2server, self.account_ring, self.container_ring,
|
||||
self.object_ring, self.url, self.token,
|
||||
self.account) = reset_environment()
|
||||
|
||||
def tearDown(self):
|
||||
kill_pids(self.pids)
|
||||
|
||||
def _get_data_file_path(self, obj_dir):
|
||||
files = sorted(os.listdir(obj_dir), reverse=True)
|
||||
for file in files:
|
||||
return os.path.join(obj_dir, file)
|
||||
kill_servers(self.port2server, self.pids)
|
||||
|
||||
def _setup_data_file(self, container, obj, data):
|
||||
client.put_container(self.url, self.token, container)
|
||||
@ -57,7 +60,7 @@ class TestObjectFailures(unittest.TestCase):
|
||||
obj_dir = '%s/%s/objects/%s/%s/%s/' % (devices,
|
||||
device, opart,
|
||||
hash_str[-3:], hash_str)
|
||||
data_file = self._get_data_file_path(obj_dir)
|
||||
data_file = get_data_file_path(obj_dir)
|
||||
return onode, opart, data_file
|
||||
|
||||
def run_quarantine(self):
|
||||
@ -65,106 +68,102 @@ class TestObjectFailures(unittest.TestCase):
|
||||
obj = 'object-%s' % uuid4()
|
||||
onode, opart, data_file = self._setup_data_file(container, obj,
|
||||
'VERIFY')
|
||||
with open(data_file) as fp:
|
||||
metadata = read_metadata(fp)
|
||||
with open(data_file) as fpointer:
|
||||
metadata = read_metadata(fpointer)
|
||||
metadata['ETag'] = 'badetag'
|
||||
with open(data_file) as fp:
|
||||
write_metadata(fp, metadata)
|
||||
with open(data_file) as fpointer:
|
||||
write_metadata(fpointer, metadata)
|
||||
|
||||
odata = direct_client.direct_get_object(onode, opart,
|
||||
self.account, container, obj)[-1]
|
||||
odata = direct_client.direct_get_object(
|
||||
onode, opart, self.account, container, obj)[-1]
|
||||
self.assertEquals(odata, 'VERIFY')
|
||||
try:
|
||||
resp = direct_client.direct_get_object(onode, opart, self.account,
|
||||
direct_client.direct_get_object(onode, opart, self.account,
|
||||
container, obj)
|
||||
raise "Did not quarantine object"
|
||||
except client.ClientException, e:
|
||||
self.assertEquals(e.http_status, 404)
|
||||
raise Exception("Did not quarantine object")
|
||||
except client.ClientException, err:
|
||||
self.assertEquals(err.http_status, 404)
|
||||
|
||||
def run_quarantine_range_etag(self):
|
||||
container = 'container-range-%s' % uuid4()
|
||||
obj = 'object-range-%s' % uuid4()
|
||||
onode, opart, data_file = self._setup_data_file(container, obj,
|
||||
'RANGE')
|
||||
with open(data_file) as fp:
|
||||
metadata = read_metadata(fp)
|
||||
with open(data_file) as fpointer:
|
||||
metadata = read_metadata(fpointer)
|
||||
metadata['ETag'] = 'badetag'
|
||||
with open(data_file) as fp:
|
||||
write_metadata(fp, metadata)
|
||||
with open(data_file) as fpointer:
|
||||
write_metadata(fpointer, metadata)
|
||||
for header, result in [({'Range': 'bytes=0-2'}, 'RAN'),
|
||||
({'Range': 'bytes=1-11'}, 'ANGE'),
|
||||
({'Range': 'bytes=0-11'}, 'RANGE')]:
|
||||
odata = direct_client.direct_get_object(onode, opart,
|
||||
self.account, container, obj,
|
||||
headers=header)[-1]
|
||||
|
||||
odata = direct_client.direct_get_object(
|
||||
onode, opart, self.account, container, obj, headers=header)[-1]
|
||||
self.assertEquals(odata, result)
|
||||
|
||||
try:
|
||||
resp = direct_client.direct_get_object(onode, opart, self.account,
|
||||
direct_client.direct_get_object(onode, opart, self.account,
|
||||
container, obj)
|
||||
raise "Did not quarantine object"
|
||||
except client.ClientException, e:
|
||||
self.assertEquals(e.http_status, 404)
|
||||
raise Exception("Did not quarantine object")
|
||||
except client.ClientException, err:
|
||||
self.assertEquals(err.http_status, 404)
|
||||
|
||||
def run_quarantine_zero_byte_get(self):
|
||||
container = 'container-zbyte-%s' % uuid4()
|
||||
obj = 'object-zbyte-%s' % uuid4()
|
||||
onode, opart, data_file = self._setup_data_file(container, obj, 'DATA')
|
||||
with open(data_file) as fp:
|
||||
metadata = read_metadata(fp)
|
||||
os.unlink(data_file)
|
||||
with open(data_file) as fpointer:
|
||||
metadata = read_metadata(fpointer)
|
||||
unlink(data_file)
|
||||
|
||||
with open(data_file, 'w') as fp:
|
||||
write_metadata(fp, metadata)
|
||||
with open(data_file, 'w') as fpointer:
|
||||
write_metadata(fpointer, metadata)
|
||||
try:
|
||||
resp = direct_client.direct_get_object(onode, opart, self.account,
|
||||
container, obj,
|
||||
conn_timeout=1,
|
||||
direct_client.direct_get_object(onode, opart, self.account,
|
||||
container, obj, conn_timeout=1,
|
||||
response_timeout=1)
|
||||
raise "Did not quarantine object"
|
||||
except client.ClientException, e:
|
||||
self.assertEquals(e.http_status, 404)
|
||||
raise Exception("Did not quarantine object")
|
||||
except client.ClientException, err:
|
||||
self.assertEquals(err.http_status, 404)
|
||||
|
||||
def run_quarantine_zero_byte_head(self):
|
||||
container = 'container-zbyte-%s' % uuid4()
|
||||
obj = 'object-zbyte-%s' % uuid4()
|
||||
onode, opart, data_file = self._setup_data_file(container, obj, 'DATA')
|
||||
with open(data_file) as fp:
|
||||
metadata = read_metadata(fp)
|
||||
os.unlink(data_file)
|
||||
with open(data_file) as fpointer:
|
||||
metadata = read_metadata(fpointer)
|
||||
unlink(data_file)
|
||||
|
||||
with open(data_file, 'w') as fp:
|
||||
write_metadata(fp, metadata)
|
||||
with open(data_file, 'w') as fpointer:
|
||||
write_metadata(fpointer, metadata)
|
||||
try:
|
||||
resp = direct_client.direct_head_object(onode, opart, self.account,
|
||||
container, obj,
|
||||
conn_timeout=1,
|
||||
direct_client.direct_head_object(onode, opart, self.account,
|
||||
container, obj, conn_timeout=1,
|
||||
response_timeout=1)
|
||||
raise "Did not quarantine object"
|
||||
except client.ClientException, e:
|
||||
self.assertEquals(e.http_status, 404)
|
||||
raise Exception("Did not quarantine object")
|
||||
except client.ClientException, err:
|
||||
self.assertEquals(err.http_status, 404)
|
||||
|
||||
def run_quarantine_zero_byte_post(self):
|
||||
container = 'container-zbyte-%s' % uuid4()
|
||||
obj = 'object-zbyte-%s' % uuid4()
|
||||
onode, opart, data_file = self._setup_data_file(container, obj, 'DATA')
|
||||
with open(data_file) as fp:
|
||||
metadata = read_metadata(fp)
|
||||
os.unlink(data_file)
|
||||
with open(data_file) as fpointer:
|
||||
metadata = read_metadata(fpointer)
|
||||
unlink(data_file)
|
||||
|
||||
with open(data_file, 'w') as fp:
|
||||
write_metadata(fp, metadata)
|
||||
with open(data_file, 'w') as fpointer:
|
||||
write_metadata(fpointer, metadata)
|
||||
try:
|
||||
resp = direct_client.direct_post_object(
|
||||
direct_client.direct_post_object(
|
||||
onode, opart, self.account,
|
||||
container, obj,
|
||||
{'X-Object-Meta-1': 'One', 'X-Object-Meta-Two': 'Two'},
|
||||
conn_timeout=1,
|
||||
response_timeout=1)
|
||||
raise "Did not quarantine object"
|
||||
except client.ClientException, e:
|
||||
self.assertEquals(e.http_status, 404)
|
||||
raise Exception("Did not quarantine object")
|
||||
except client.ClientException, err:
|
||||
self.assertEquals(err.http_status, 404)
|
||||
|
||||
def test_runner(self):
|
||||
self.run_quarantine()
|
||||
@ -173,5 +172,6 @@ class TestObjectFailures(unittest.TestCase):
|
||||
self.run_quarantine_zero_byte_head()
|
||||
self.run_quarantine_zero_byte_post()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
main()
|
||||
|
@ -14,33 +14,55 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
from os import kill
|
||||
from signal import SIGTERM
|
||||
from subprocess import call, Popen
|
||||
from time import sleep
|
||||
from unittest import main, TestCase
|
||||
from uuid import uuid4
|
||||
|
||||
from swiftclient import client
|
||||
|
||||
from swift.common import direct_client
|
||||
|
||||
from test.probe.common import kill_pids, reset_environment
|
||||
from test.probe.common import kill_server, kill_servers, reset_environment, \
|
||||
start_server
|
||||
|
||||
|
||||
class TestObjectHandoff(unittest.TestCase):
|
||||
class TestObjectHandoff(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.pids, self.port2server, self.account_ring, self.container_ring, \
|
||||
self.object_ring, self.url, self.token, self.account = \
|
||||
reset_environment()
|
||||
(self.pids, self.port2server, self.account_ring, self.container_ring,
|
||||
self.object_ring, self.url, self.token,
|
||||
self.account) = reset_environment()
|
||||
|
||||
def tearDown(self):
|
||||
kill_pids(self.pids)
|
||||
kill_servers(self.port2server, self.pids)
|
||||
|
||||
def test_main(self):
|
||||
# Create container
|
||||
# Kill one container/obj primary server
|
||||
# Create container/obj (goes to two primary servers and one handoff)
|
||||
# Kill other two container/obj primary servers
|
||||
# Indirectly through proxy assert we can get container/obj
|
||||
# Restart those other two container/obj primary servers
|
||||
# Directly to handoff server assert we can get container/obj
|
||||
# Assert container listing (via proxy and directly) has container/obj
|
||||
# Bring the first container/obj primary server back up
|
||||
# Assert that it doesn't have container/obj yet
|
||||
# Run object replication, ensuring we run the handoff node last so it
|
||||
# should remove its extra handoff partition
|
||||
# Assert the first container/obj primary server now has container/obj
|
||||
# Assert the handoff server no longer has container/obj
|
||||
# Kill the first container/obj primary server again (we have two
|
||||
# primaries and the handoff up now)
|
||||
# Delete container/obj
|
||||
# Assert we can't head container/obj
|
||||
# Assert container/obj is not in the container listing, both indirectly
|
||||
# and directly
|
||||
# Restart the first container/obj primary server again
|
||||
# Assert it still has container/obj
|
||||
# Run object replication, ensuring we run the handoff node last so it
|
||||
# should remove its extra handoff partition
|
||||
# Assert primary node no longer has container/obj
|
||||
container = 'container-%s' % uuid4()
|
||||
client.put_container(self.url, self.token, container)
|
||||
apart, anodes = self.account_ring.get_nodes(self.account)
|
||||
|
||||
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
|
||||
cnode = cnodes[0]
|
||||
@ -48,7 +70,7 @@ class TestObjectHandoff(unittest.TestCase):
|
||||
opart, onodes = self.object_ring.get_nodes(
|
||||
self.account, container, obj)
|
||||
onode = onodes[0]
|
||||
kill(self.pids[self.port2server[onode['port']]], SIGTERM)
|
||||
kill_server(onode['port'], self.port2server, self.pids)
|
||||
client.put_object(self.url, self.token, container, obj, 'VERIFY')
|
||||
odata = client.get_object(self.url, self.token, container, obj)[-1]
|
||||
if odata != 'VERIFY':
|
||||
@ -56,22 +78,18 @@ class TestObjectHandoff(unittest.TestCase):
|
||||
'returned: %s' % repr(odata))
|
||||
# Kill all primaries to ensure GET handoff works
|
||||
for node in onodes[1:]:
|
||||
kill(self.pids[self.port2server[node['port']]], SIGTERM)
|
||||
kill_server(node['port'], self.port2server, self.pids)
|
||||
odata = client.get_object(self.url, self.token, container, obj)[-1]
|
||||
if odata != 'VERIFY':
|
||||
raise Exception('Object GET did not return VERIFY, instead it '
|
||||
'returned: %s' % repr(odata))
|
||||
for node in onodes[1:]:
|
||||
self.pids[self.port2server[node['port']]] = Popen([
|
||||
'swift-object-server',
|
||||
'/etc/swift/object-server/%d.conf' %
|
||||
((node['port'] - 6000) / 10)]).pid
|
||||
sleep(2)
|
||||
start_server(node['port'], self.port2server, self.pids)
|
||||
# We've indirectly verified the handoff node has the object, but let's
|
||||
# directly verify it.
|
||||
another_onode = self.object_ring.get_more_nodes(opart).next()
|
||||
odata = direct_client.direct_get_object(another_onode, opart,
|
||||
self.account, container, obj)[-1]
|
||||
odata = direct_client.direct_get_object(
|
||||
another_onode, opart, self.account, container, obj)[-1]
|
||||
if odata != 'VERIFY':
|
||||
raise Exception('Direct object GET did not return VERIFY, instead '
|
||||
'it returned: %s' % repr(odata))
|
||||
@ -81,33 +99,28 @@ class TestObjectHandoff(unittest.TestCase):
|
||||
raise Exception('Container listing did not know about object')
|
||||
for cnode in cnodes:
|
||||
objs = [o['name'] for o in
|
||||
direct_client.direct_get_container(cnode, cpart,
|
||||
self.account, container)[1]]
|
||||
direct_client.direct_get_container(
|
||||
cnode, cpart, self.account, container)[1]]
|
||||
if obj not in objs:
|
||||
raise Exception(
|
||||
'Container server %s:%s did not know about object' %
|
||||
(cnode['ip'], cnode['port']))
|
||||
self.pids[self.port2server[onode['port']]] = Popen([
|
||||
'swift-object-server',
|
||||
'/etc/swift/object-server/%d.conf' %
|
||||
((onode['port'] - 6000) / 10)]).pid
|
||||
sleep(2)
|
||||
exc = False
|
||||
start_server(onode['port'], self.port2server, self.pids)
|
||||
exc = None
|
||||
try:
|
||||
direct_client.direct_get_object(onode, opart, self.account,
|
||||
container, obj)
|
||||
except Exception:
|
||||
exc = True
|
||||
if not exc:
|
||||
raise Exception('Previously downed object server had test object')
|
||||
except direct_client.ClientException, err:
|
||||
exc = err
|
||||
self.assertEquals(exc.http_status, 404)
|
||||
# Run the extra server last so it'll remove its extra partition
|
||||
ps = []
|
||||
for n in onodes:
|
||||
ps.append(Popen(['swift-object-replicator',
|
||||
processes = []
|
||||
for node in onodes:
|
||||
processes.append(Popen(['swift-object-replicator',
|
||||
'/etc/swift/object-server/%d.conf' %
|
||||
((n['port'] - 6000) / 10), 'once']))
|
||||
for p in ps:
|
||||
p.wait()
|
||||
((node['port'] - 6000) / 10), 'once']))
|
||||
for process in processes:
|
||||
process.wait()
|
||||
call(['swift-object-replicator',
|
||||
'/etc/swift/object-server/%d.conf' %
|
||||
((another_onode['port'] - 6000) / 10), 'once'])
|
||||
@ -116,68 +129,22 @@ class TestObjectHandoff(unittest.TestCase):
|
||||
if odata != 'VERIFY':
|
||||
raise Exception('Direct object GET did not return VERIFY, instead '
|
||||
'it returned: %s' % repr(odata))
|
||||
exc = False
|
||||
exc = None
|
||||
try:
|
||||
direct_client.direct_get_object(another_onode, opart, self.account,
|
||||
container, obj)
|
||||
except Exception:
|
||||
exc = True
|
||||
if not exc:
|
||||
raise Exception('Handoff object server still had test object')
|
||||
except direct_client.ClientException, err:
|
||||
exc = err
|
||||
self.assertEquals(exc.http_status, 404)
|
||||
|
||||
# Because POST has changed to a COPY by default, POSTs will succeed on all up
|
||||
# nodes now if at least one up node has the object.
|
||||
# kill(self.pids[self.port2server[onode['port']]], SIGTERM)
|
||||
# client.post_object(self.url, self.token, container, obj,
|
||||
# headers={'x-object-meta-probe': 'value'})
|
||||
# oheaders = client.head_object(self.url, self.token, container, obj)
|
||||
# if oheaders.get('x-object-meta-probe') != 'value':
|
||||
# raise Exception('Metadata incorrect, was %s' % repr(oheaders))
|
||||
# exc = False
|
||||
# try:
|
||||
# direct_client.direct_get_object(another_onode, opart, self.account,
|
||||
# container, obj)
|
||||
# except Exception:
|
||||
# exc = True
|
||||
# if not exc:
|
||||
# raise Exception('Handoff server claimed it had the object when '
|
||||
# 'it should not have it')
|
||||
# self.pids[self.port2server[onode['port']]] = Popen([
|
||||
# 'swift-object-server',
|
||||
# '/etc/swift/object-server/%d.conf' %
|
||||
# ((onode['port'] - 6000) / 10)]).pid
|
||||
# sleep(2)
|
||||
# oheaders = direct_client.direct_get_object(onode, opart, self.account,
|
||||
# container, obj)[0]
|
||||
# if oheaders.get('x-object-meta-probe') == 'value':
|
||||
# raise Exception('Previously downed object server had the new '
|
||||
# 'metadata when it should not have it')
|
||||
# # Run the extra server last so it'll remove its extra partition
|
||||
# ps = []
|
||||
# for n in onodes:
|
||||
# ps.append(Popen(['swift-object-replicator',
|
||||
# '/etc/swift/object-server/%d.conf' %
|
||||
# ((n['port'] - 6000) / 10), 'once']))
|
||||
# for p in ps:
|
||||
# p.wait()
|
||||
# call(['swift-object-replicator',
|
||||
# '/etc/swift/object-server/%d.conf' %
|
||||
# ((another_onode['port'] - 6000) / 10), 'once'])
|
||||
# oheaders = direct_client.direct_get_object(onode, opart, self.account,
|
||||
# container, obj)[0]
|
||||
# if oheaders.get('x-object-meta-probe') != 'value':
|
||||
# raise Exception(
|
||||
# 'Previously downed object server did not have the new metadata')
|
||||
|
||||
kill(self.pids[self.port2server[onode['port']]], SIGTERM)
|
||||
kill_server(onode['port'], self.port2server, self.pids)
|
||||
client.delete_object(self.url, self.token, container, obj)
|
||||
exc = False
|
||||
exc = None
|
||||
try:
|
||||
client.head_object(self.url, self.token, container, obj)
|
||||
except Exception:
|
||||
exc = True
|
||||
if not exc:
|
||||
raise Exception('Regular object HEAD was still successful')
|
||||
except direct_client.ClientException, err:
|
||||
exc = err
|
||||
self.assertEquals(exc.http_status, 404)
|
||||
objs = [o['name'] for o in
|
||||
client.get_container(self.url, self.token, container)[1]]
|
||||
if obj in objs:
|
||||
@ -190,33 +157,28 @@ class TestObjectHandoff(unittest.TestCase):
|
||||
raise Exception(
|
||||
'Container server %s:%s still knew about object' %
|
||||
(cnode['ip'], cnode['port']))
|
||||
self.pids[self.port2server[onode['port']]] = Popen([
|
||||
'swift-object-server',
|
||||
'/etc/swift/object-server/%d.conf' %
|
||||
((onode['port'] - 6000) / 10)]).pid
|
||||
sleep(2)
|
||||
start_server(onode['port'], self.port2server, self.pids)
|
||||
direct_client.direct_get_object(onode, opart, self.account, container,
|
||||
obj)
|
||||
# Run the extra server last so it'll remove its extra partition
|
||||
ps = []
|
||||
for n in onodes:
|
||||
ps.append(Popen(['swift-object-replicator',
|
||||
processes = []
|
||||
for node in onodes:
|
||||
processes.append(Popen(['swift-object-replicator',
|
||||
'/etc/swift/object-server/%d.conf' %
|
||||
((n['port'] - 6000) / 10), 'once']))
|
||||
for p in ps:
|
||||
p.wait()
|
||||
((node['port'] - 6000) / 10), 'once']))
|
||||
for process in processes:
|
||||
process.wait()
|
||||
call(['swift-object-replicator',
|
||||
'/etc/swift/object-server/%d.conf' %
|
||||
((another_onode['port'] - 6000) / 10), 'once'])
|
||||
exc = False
|
||||
exc = None
|
||||
try:
|
||||
direct_client.direct_get_object(another_onode, opart, self.account,
|
||||
container, obj)
|
||||
except Exception:
|
||||
exc = True
|
||||
if not exc:
|
||||
raise Exception('Handoff object server still had the object')
|
||||
except direct_client.ClientException, err:
|
||||
exc = err
|
||||
self.assertEquals(exc.http_status, 404)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
main()
|
||||
|
@ -1,130 +0,0 @@
|
||||
#!/usr/bin/python -u
|
||||
# Copyright (c) 2010-2012 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
from os import kill
|
||||
from signal import SIGTERM
|
||||
from subprocess import Popen
|
||||
from time import sleep
|
||||
|
||||
from swiftclient import client
|
||||
|
||||
from test.probe.common import get_to_final_state, kill_pids, reset_environment
|
||||
|
||||
|
||||
class TestRunningWithEachTypeDown(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.pids, self.port2server, self.account_ring, self.container_ring, \
|
||||
self.object_ring, self.url, self.token, self.account = \
|
||||
reset_environment()
|
||||
|
||||
def tearDown(self):
|
||||
kill_pids(self.pids)
|
||||
|
||||
def test_main(self):
|
||||
# TODO: This test "randomly" pass or doesn't pass; need to find out why
|
||||
return
|
||||
apart, anodes = self.account_ring.get_nodes(self.account)
|
||||
kill(self.pids[self.port2server[anodes[0]['port']]], SIGTERM)
|
||||
cpart, cnodes = \
|
||||
self.container_ring.get_nodes(self.account, 'container1')
|
||||
kill(self.pids[self.port2server[cnodes[0]['port']]], SIGTERM)
|
||||
opart, onodes = \
|
||||
self.object_ring.get_nodes(self.account, 'container1', 'object1')
|
||||
kill(self.pids[self.port2server[onodes[0]['port']]], SIGTERM)
|
||||
|
||||
try:
|
||||
client.put_container(self.url, self.token, 'container1')
|
||||
except client.ClientException, err:
|
||||
# This might 503 if one of the up container nodes tries to update
|
||||
# the down account node. It'll still be saved on one node, but we
|
||||
# can't assure the user.
|
||||
pass
|
||||
client.put_object(self.url, self.token, 'container1', 'object1', '1234')
|
||||
get_to_final_state()
|
||||
headers, containers = client.head_account(self.url, self.token)
|
||||
self.assertEquals(headers['x-account-container-count'], '1')
|
||||
self.assertEquals(headers['x-account-object-count'], '1')
|
||||
self.assertEquals(headers['x-account-bytes-used'], '4')
|
||||
found1 = False
|
||||
for container in containers:
|
||||
if container['name'] == 'container1':
|
||||
found1 = True
|
||||
self.assertEquals(container['count'], 1)
|
||||
self.assertEquals(container['bytes'], 4)
|
||||
self.assert_(found1)
|
||||
found1 = False
|
||||
for obj in client.get_container(self.url, self.token, 'container1')[1]:
|
||||
if obj['name'] == 'object1':
|
||||
found1 = True
|
||||
self.assertEquals(obj['bytes'], 4)
|
||||
self.assert_(found1)
|
||||
|
||||
self.pids[self.port2server[anodes[0]['port']]] = \
|
||||
Popen(['swift-account-server',
|
||||
'/etc/swift/account-server/%d.conf' %
|
||||
((anodes[0]['port'] - 6002) / 10)]).pid
|
||||
self.pids[self.port2server[cnodes[0]['port']]] = \
|
||||
Popen(['swift-container-server',
|
||||
'/etc/swift/container-server/%d.conf' %
|
||||
((cnodes[0]['port'] - 6001) / 10)]).pid
|
||||
self.pids[self.port2server[onodes[0]['port']]] = \
|
||||
Popen(['swift-object-server',
|
||||
'/etc/swift/object-server/%d.conf' %
|
||||
((onodes[0]['port'] - 6000) / 10)]).pid
|
||||
sleep(2)
|
||||
headers, containers = client.head_account(self.url, self.token)
|
||||
self.assertEquals(headers['x-account-container-count'], '1')
|
||||
self.assertEquals(headers['x-account-object-count'], '1')
|
||||
self.assertEquals(headers['x-account-bytes-used'], '4')
|
||||
found1 = False
|
||||
for container in containers:
|
||||
if container['name'] == 'container1':
|
||||
found1 = True
|
||||
# The account node was previously down.
|
||||
self.assert_(not found1)
|
||||
found1 = False
|
||||
for obj in client.get_container(self.url, self.token, 'container1')[1]:
|
||||
if obj['name'] == 'object1':
|
||||
found1 = True
|
||||
self.assertEquals(obj['bytes'], 4)
|
||||
# The first container node 404s, but the proxy will try the next node
|
||||
# and succeed.
|
||||
self.assert_(found1)
|
||||
|
||||
get_to_final_state()
|
||||
headers, containers = client.head_account(self.url, self.token)
|
||||
self.assertEquals(headers['x-account-container-count'], '1')
|
||||
self.assertEquals(headers['x-account-object-count'], '1')
|
||||
self.assertEquals(headers['x-account-bytes-used'], '4')
|
||||
found1 = False
|
||||
for container in containers:
|
||||
if container['name'] == 'container1':
|
||||
found1 = True
|
||||
self.assertEquals(container['count'], 1)
|
||||
self.assertEquals(container['bytes'], 4)
|
||||
self.assert_(found1)
|
||||
found1 = False
|
||||
for obj in client.get_container(self.url, self.token, 'container1')[1]:
|
||||
if obj['name'] == 'object1':
|
||||
found1 = True
|
||||
self.assertEquals(obj['bytes'], 4)
|
||||
self.assert_(found1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
Loading…
Reference in New Issue
Block a user