Add tox configuration and fixe flake8 errors
This change adds tests structure. Change-Id: I62f6c238de5c55b7673ae8b38a6ededdd77f4d4b
This commit is contained in:
parent
813c1af44e
commit
6d2894de8b
3
.stestr.conf
Normal file
3
.stestr.conf
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
test_path=tests/
|
||||||
|
top_dir=./
|
@ -56,9 +56,15 @@
|
|||||||
check:
|
check:
|
||||||
jobs:
|
jobs:
|
||||||
- zuul-registry-build-image
|
- zuul-registry-build-image
|
||||||
|
- tox-pep8
|
||||||
|
- tox-py37:
|
||||||
|
nodeset: fedora-latest
|
||||||
gate:
|
gate:
|
||||||
jobs:
|
jobs:
|
||||||
- zuul-registry-upload-image
|
- zuul-registry-upload-image
|
||||||
|
- tox-pep8
|
||||||
|
- tox-py37:
|
||||||
|
nodeset: fedora-latest
|
||||||
promote:
|
promote:
|
||||||
jobs:
|
jobs:
|
||||||
- zuul-registry-promote-image
|
- zuul-registry-promote-image
|
||||||
|
2
test-requirements.txt
Normal file
2
test-requirements.txt
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
flake8
|
||||||
|
stestr
|
13
tests/__init__.py
Normal file
13
tests/__init__.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Copyright 2019 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
24
tests/test_filesystem.py
Normal file
24
tests/test_filesystem.py
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
# Copyright 2019 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# This module is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This software is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import testtools
|
||||||
|
|
||||||
|
from zuul_registry.filesystem import FilesystemDriver
|
||||||
|
|
||||||
|
|
||||||
|
class TestFilesystemDriver(testtools.TestCase):
|
||||||
|
def test_list_objects(self):
|
||||||
|
driver = FilesystemDriver({'root': '.'})
|
||||||
|
print(driver.list_objects("."))
|
29
tox.ini
Normal file
29
tox.ini
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
[tox]
|
||||||
|
minversion = 3.2
|
||||||
|
skipsdist = True
|
||||||
|
envlist = pep8,py37
|
||||||
|
|
||||||
|
[testenv]
|
||||||
|
basepython = python3
|
||||||
|
usedevelop = True
|
||||||
|
deps =
|
||||||
|
-r{toxinidir}/requirements.txt
|
||||||
|
-r{toxinidir}/test-requirements.txt
|
||||||
|
commands =
|
||||||
|
stestr run {posargs}
|
||||||
|
stestr slowest
|
||||||
|
|
||||||
|
[testenv:pep8]
|
||||||
|
install_command = pip install {opts} {packages}
|
||||||
|
commands =
|
||||||
|
flake8 {posargs}
|
||||||
|
|
||||||
|
[testenv:venv]
|
||||||
|
commands = {posargs}
|
||||||
|
|
||||||
|
[flake8]
|
||||||
|
# These are ignored intentionally in zuul projects;
|
||||||
|
# please don't submit patches that solely correct them or enable them.
|
||||||
|
ignore = E124,E125,E129,E252,E402,E741,H,W503,W504
|
||||||
|
show-source = True
|
||||||
|
exclude = .venv,.tox,dist,doc,build,*.egg,node_modules
|
@ -14,16 +14,15 @@
|
|||||||
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import time
|
|
||||||
|
|
||||||
from . import storageutils
|
from . import storageutils
|
||||||
|
|
||||||
|
|
||||||
class FilesystemDriver(storageutils.StorageDriver):
|
class FilesystemDriver(storageutils.StorageDriver):
|
||||||
def __init__(self, conf):
|
def __init__(self, conf):
|
||||||
self.root = conf['root']
|
self.root = conf['root']
|
||||||
|
|
||||||
def list_objects(self, path):
|
def list_objects(self, path):
|
||||||
now = time.time()
|
|
||||||
path = os.path.join(self.root, path)
|
path = os.path.join(self.root, path)
|
||||||
if not os.path.isdir(path):
|
if not os.path.isdir(path):
|
||||||
return []
|
return []
|
||||||
@ -88,4 +87,5 @@ class FilesystemDriver(storageutils.StorageDriver):
|
|||||||
chunk_path = os.path.join(self.root, chunk_path)
|
chunk_path = os.path.join(self.root, chunk_path)
|
||||||
os.unlink(chunk_path)
|
os.unlink(chunk_path)
|
||||||
|
|
||||||
|
|
||||||
Driver = FilesystemDriver
|
Driver = FilesystemDriver
|
||||||
|
@ -31,6 +31,7 @@ DRIVERS = {
|
|||||||
'swift': swift.Driver,
|
'swift': swift.Driver,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class Authorization:
|
class Authorization:
|
||||||
def __init__(self, users):
|
def __init__(self, users):
|
||||||
self.ro = {}
|
self.ro = {}
|
||||||
@ -52,6 +53,7 @@ class Authorization:
|
|||||||
return False
|
return False
|
||||||
return store[user] == password
|
return store[user] == password
|
||||||
|
|
||||||
|
|
||||||
class RegistryAPI:
|
class RegistryAPI:
|
||||||
"""Registry API server.
|
"""Registry API server.
|
||||||
|
|
||||||
@ -125,9 +127,10 @@ class RegistryAPI:
|
|||||||
method = cherrypy.request.method
|
method = cherrypy.request.method
|
||||||
uuid = self.storage.start_upload(namespace)
|
uuid = self.storage.start_upload(namespace)
|
||||||
self.log.info('Start upload %s %s uuid %s digest %s',
|
self.log.info('Start upload %s %s uuid %s digest %s',
|
||||||
method, repository, uuid, digest)
|
method, repository, uuid, digest)
|
||||||
res = cherrypy.response
|
res = cherrypy.response
|
||||||
res.headers['Location'] = '/v2/%s/blobs/uploads/%s' % (repository, uuid)
|
res.headers['Location'] = '/v2/%s/blobs/uploads/%s' % (
|
||||||
|
repository, uuid)
|
||||||
res.headers['Docker-Upload-UUID'] = uuid
|
res.headers['Docker-Upload-UUID'] = uuid
|
||||||
res.headers['Range'] = '0-0'
|
res.headers['Range'] = '0-0'
|
||||||
res.status = '202 Accepted'
|
res.status = '202 Accepted'
|
||||||
@ -140,11 +143,13 @@ class RegistryAPI:
|
|||||||
old_length, new_length = self.storage.upload_chunk(
|
old_length, new_length = self.storage.upload_chunk(
|
||||||
namespace, uuid, cherrypy.request.body)
|
namespace, uuid, cherrypy.request.body)
|
||||||
res = cherrypy.response
|
res = cherrypy.response
|
||||||
res.headers['Location'] = '/v2/%s/blobs/uploads/%s' % (repository, uuid)
|
res.headers['Location'] = '/v2/%s/blobs/uploads/%s' % (
|
||||||
|
repository, uuid)
|
||||||
res.headers['Docker-Upload-UUID'] = uuid
|
res.headers['Docker-Upload-UUID'] = uuid
|
||||||
res.headers['Range'] = '0-%s' % (new_length,)
|
res.headers['Range'] = '0-%s' % (new_length,)
|
||||||
res.status = '204 No Content'
|
res.status = '204 No Content'
|
||||||
self.log.info('Finish Upload chunk %s %s %s', repository, uuid, new_length)
|
self.log.info(
|
||||||
|
'Finish Upload chunk %s %s %s', repository, uuid, new_length)
|
||||||
|
|
||||||
@cherrypy.expose
|
@cherrypy.expose
|
||||||
@cherrypy.config(**{'tools.auth_basic.checkpassword': require_write})
|
@cherrypy.config(**{'tools.auth_basic.checkpassword': require_write})
|
||||||
@ -217,8 +222,10 @@ class RegistryAPI:
|
|||||||
self.log.error('Manifest %s %s not found', repository, ref)
|
self.log.error('Manifest %s %s not found', repository, ref)
|
||||||
return self.not_found()
|
return self.not_found()
|
||||||
|
|
||||||
|
|
||||||
class RegistryServer:
|
class RegistryServer:
|
||||||
log = logging.getLogger("registry.server")
|
log = logging.getLogger("registry.server")
|
||||||
|
|
||||||
def __init__(self, config_path):
|
def __init__(self, config_path):
|
||||||
self.log.info("Loading config from %s", config_path)
|
self.log.info("Loading config from %s", config_path)
|
||||||
self._load_config(config_path)
|
self._load_config(config_path)
|
||||||
@ -297,6 +304,7 @@ class RegistryServer:
|
|||||||
def prune(self):
|
def prune(self):
|
||||||
self.store.prune()
|
self.store.prune()
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description='Zuul registry server')
|
description='Zuul registry server')
|
||||||
@ -321,7 +329,7 @@ def main():
|
|||||||
logging.getLogger("urllib3").setLevel(logging.DEBUG)
|
logging.getLogger("urllib3").setLevel(logging.DEBUG)
|
||||||
logging.getLogger("stevedore").setLevel(logging.INFO)
|
logging.getLogger("stevedore").setLevel(logging.INFO)
|
||||||
logging.getLogger("openstack").setLevel(logging.DEBUG)
|
logging.getLogger("openstack").setLevel(logging.DEBUG)
|
||||||
#cherrypy.log.error_log.propagate = False
|
# cherrypy.log.error_log.propagate = False
|
||||||
|
|
||||||
s = RegistryServer(args.config)
|
s = RegistryServer(args.config)
|
||||||
if args.command == 'serve':
|
if args.command == 'serve':
|
||||||
|
@ -23,6 +23,7 @@ import threading
|
|||||||
import time
|
import time
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
|
||||||
|
|
||||||
class UploadRecord:
|
class UploadRecord:
|
||||||
"""Information about an upload.
|
"""Information about an upload.
|
||||||
|
|
||||||
@ -65,16 +66,19 @@ class UploadRecord:
|
|||||||
data = json.loads(data.decode('utf8'))
|
data = json.loads(data.decode('utf8'))
|
||||||
self.chunks = data['chunks']
|
self.chunks = data['chunks']
|
||||||
hash_state = data['hash_state']
|
hash_state = data['hash_state']
|
||||||
hash_state['md_data'] = base64.decodebytes(hash_state['md_data'].encode('ascii'))
|
hash_state['md_data'] = base64.decodebytes(
|
||||||
|
hash_state['md_data'].encode('ascii'))
|
||||||
self.hasher.__setstate__(hash_state)
|
self.hasher.__setstate__(hash_state)
|
||||||
|
|
||||||
def dump(self):
|
def dump(self):
|
||||||
hash_state = self.hasher.__getstate__()
|
hash_state = self.hasher.__getstate__()
|
||||||
hash_state['md_data'] = base64.encodebytes(hash_state['md_data']).decode('ascii')
|
hash_state['md_data'] = base64.encodebytes(
|
||||||
data = dict(chunks = self.chunks,
|
hash_state['md_data']).decode('ascii')
|
||||||
hash_state = hash_state)
|
data = dict(chunks=self.chunks,
|
||||||
|
hash_state=hash_state)
|
||||||
return json.dumps(data).encode('utf8')
|
return json.dumps(data).encode('utf8')
|
||||||
|
|
||||||
|
|
||||||
class UploadStreamer:
|
class UploadStreamer:
|
||||||
"""Stream an upload to the object storage.
|
"""Stream an upload to the object storage.
|
||||||
|
|
||||||
@ -96,6 +100,7 @@ class UploadStreamer:
|
|||||||
break
|
break
|
||||||
yield d
|
yield d
|
||||||
|
|
||||||
|
|
||||||
class Storage:
|
class Storage:
|
||||||
"""Storage abstraction layer.
|
"""Storage abstraction layer.
|
||||||
|
|
||||||
@ -145,7 +150,6 @@ class Storage:
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
uuid = uuid4().hex
|
uuid = uuid4().hex
|
||||||
path = os.path.join(namespace, 'uploads', uuid, 'metadata')
|
|
||||||
upload = UploadRecord()
|
upload = UploadRecord()
|
||||||
self._update_upload(namespace, uuid, upload)
|
self._update_upload(namespace, uuid, upload)
|
||||||
return uuid
|
return uuid
|
||||||
@ -198,7 +202,7 @@ class Storage:
|
|||||||
t.join()
|
t.join()
|
||||||
upload.chunks.append(dict(size=size))
|
upload.chunks.append(dict(size=size))
|
||||||
self._update_upload(namespace, uuid, upload)
|
self._update_upload(namespace, uuid, upload)
|
||||||
return upload.size-size, upload.size
|
return upload.size - size, upload.size
|
||||||
|
|
||||||
def store_upload(self, namespace, uuid, digest):
|
def store_upload(self, namespace, uuid, digest):
|
||||||
"""Complete an upload.
|
"""Complete an upload.
|
||||||
@ -215,7 +219,7 @@ class Storage:
|
|||||||
# Move the chunks into the blob dir to get them out of the
|
# Move the chunks into the blob dir to get them out of the
|
||||||
# uploads dir.
|
# uploads dir.
|
||||||
chunks = []
|
chunks = []
|
||||||
for i in range(1, upload.count+1):
|
for i in range(1, upload.count + 1):
|
||||||
src_path = os.path.join(namespace, 'uploads', uuid, str(i))
|
src_path = os.path.join(namespace, 'uploads', uuid, str(i))
|
||||||
dst_path = os.path.join(namespace, 'blobs', digest, str(i))
|
dst_path = os.path.join(namespace, 'blobs', digest, str(i))
|
||||||
chunks.append(dst_path)
|
chunks.append(dst_path)
|
||||||
@ -271,7 +275,8 @@ class Storage:
|
|||||||
self.log.debug('Get layers %s', path)
|
self.log.debug('Get layers %s', path)
|
||||||
data = self.backend.get_object(path)
|
data = self.backend.get_object(path)
|
||||||
manifest = json.loads(data)
|
manifest = json.loads(data)
|
||||||
target = manifest.get('application/vnd.docker.distribution.manifest.v2+json')
|
target = manifest.get(
|
||||||
|
'application/vnd.docker.distribution.manifest.v2+json')
|
||||||
layers = []
|
layers = []
|
||||||
if not target:
|
if not target:
|
||||||
self.log.debug('Unknown manifest %s', path)
|
self.log.debug('Unknown manifest %s', path)
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
|
|
||||||
from abc import ABCMeta, abstractmethod
|
from abc import ABCMeta, abstractmethod
|
||||||
|
|
||||||
|
|
||||||
class ObjectInfo:
|
class ObjectInfo:
|
||||||
def __init__(self, path, name, ctime, isdir):
|
def __init__(self, path, name, ctime, isdir):
|
||||||
self.path = path
|
self.path = path
|
||||||
@ -22,6 +23,7 @@ class ObjectInfo:
|
|||||||
self.ctime = ctime
|
self.ctime = ctime
|
||||||
self.isdir = isdir
|
self.isdir = isdir
|
||||||
|
|
||||||
|
|
||||||
class StorageDriver(metaclass=ABCMeta):
|
class StorageDriver(metaclass=ABCMeta):
|
||||||
"""Base class for storage drivers.
|
"""Base class for storage drivers.
|
||||||
|
|
||||||
|
@ -27,6 +27,7 @@ from . import storageutils
|
|||||||
|
|
||||||
POST_ATTEMPTS = 3
|
POST_ATTEMPTS = 3
|
||||||
|
|
||||||
|
|
||||||
def retry_function(func):
|
def retry_function(func):
|
||||||
for attempt in range(1, POST_ATTEMPTS + 1):
|
for attempt in range(1, POST_ATTEMPTS + 1):
|
||||||
try:
|
try:
|
||||||
@ -40,6 +41,7 @@ def retry_function(func):
|
|||||||
logging.exception("Error on attempt %d" % attempt)
|
logging.exception("Error on attempt %d" % attempt)
|
||||||
time.sleep(attempt * 10)
|
time.sleep(attempt * 10)
|
||||||
|
|
||||||
|
|
||||||
class SwiftDriver(storageutils.StorageDriver):
|
class SwiftDriver(storageutils.StorageDriver):
|
||||||
log = logging.getLogger('registry.swift')
|
log = logging.getLogger('registry.swift')
|
||||||
|
|
||||||
@ -76,7 +78,8 @@ class SwiftDriver(storageutils.StorageDriver):
|
|||||||
else:
|
else:
|
||||||
objpath = obj['name']
|
objpath = obj['name']
|
||||||
name = obj['name'].split('/')[-1]
|
name = obj['name'].split('/')[-1]
|
||||||
ctime = dateutil.parser.parse(obj['last_modified']+'Z').timestamp()
|
ctime = dateutil.parser.parse(
|
||||||
|
obj['last_modified'] + 'Z').timestamp()
|
||||||
isdir = False
|
isdir = False
|
||||||
ret.append(storageutils.ObjectInfo(
|
ret.append(storageutils.ObjectInfo(
|
||||||
objpath, name, ctime, isdir))
|
objpath, name, ctime, isdir))
|
||||||
@ -126,7 +129,7 @@ class SwiftDriver(storageutils.StorageDriver):
|
|||||||
dst = os.path.join(self.container_name, dst_path)
|
dst = os.path.join(self.container_name, dst_path)
|
||||||
retry_function(
|
retry_function(
|
||||||
lambda: self.conn.session.request(
|
lambda: self.conn.session.request(
|
||||||
self.get_url(src_path)+"?multipart-manfest=get",
|
self.get_url(src_path) + "?multipart-manfest=get",
|
||||||
'COPY',
|
'COPY',
|
||||||
headers={'Destination': dst}
|
headers={'Destination': dst}
|
||||||
))
|
))
|
||||||
@ -136,7 +139,7 @@ class SwiftDriver(storageutils.StorageDriver):
|
|||||||
|
|
||||||
def cat_objects(self, path, chunks):
|
def cat_objects(self, path, chunks):
|
||||||
manifest = []
|
manifest = []
|
||||||
#TODO: Would it be better to move 1-chunk objects?
|
# TODO: Would it be better to move 1-chunk objects?
|
||||||
for chunk_path in chunks:
|
for chunk_path in chunks:
|
||||||
ret = retry_function(
|
ret = retry_function(
|
||||||
lambda: self.conn.session.head(self.get_url(chunk_path)))
|
lambda: self.conn.session.head(self.get_url(chunk_path)))
|
||||||
@ -148,7 +151,8 @@ class SwiftDriver(storageutils.StorageDriver):
|
|||||||
'size_bytes': ret.headers['Content-Length']})
|
'size_bytes': ret.headers['Content-Length']})
|
||||||
retry_function(lambda:
|
retry_function(lambda:
|
||||||
self.conn.session.put(
|
self.conn.session.put(
|
||||||
self.get_url(path)+"?multipart-manifest=put",
|
self.get_url(path) + "?multipart-manifest=put",
|
||||||
data=json.dumps(manifest)))
|
data=json.dumps(manifest)))
|
||||||
|
|
||||||
|
|
||||||
Driver = SwiftDriver
|
Driver = SwiftDriver
|
||||||
|
Loading…
Reference in New Issue
Block a user