Merge "Import swift3 into swift repo as s3api middleware"
This commit is contained in:
commit
3313392462
14
.zuul.yaml
14
.zuul.yaml
@ -105,6 +105,18 @@
|
||||
vars:
|
||||
tox_envlist: func-domain-remap-staticweb
|
||||
|
||||
- job:
|
||||
name: swift-tox-func-s3api
|
||||
parent: swift-tox-base
|
||||
description: |
|
||||
Run functional tests for swift under cPython version 2.7.
|
||||
|
||||
Uses tox with the ``func-s3api`` environment.
|
||||
It sets TMPDIR to an XFS mount point created via
|
||||
tools/test-setup.sh.
|
||||
vars:
|
||||
tox_envlist: func-s3api
|
||||
|
||||
- job:
|
||||
name: swift-probetests-centos-7
|
||||
parent: unittests
|
||||
@ -128,6 +140,7 @@
|
||||
- swift-tox-func-encryption
|
||||
- swift-tox-func-domain-remap-staticweb
|
||||
- swift-tox-func-ec
|
||||
- swift-tox-func-s3api
|
||||
- swift-probetests-centos-7
|
||||
gate:
|
||||
jobs:
|
||||
@ -137,6 +150,7 @@
|
||||
- swift-tox-func-encryption
|
||||
- swift-tox-func-domain-remap-staticweb
|
||||
- swift-tox-func-ec
|
||||
- swift-tox-func-s3api
|
||||
experimental:
|
||||
jobs:
|
||||
- swift-tox-py27-centos-7
|
||||
|
7
AUTHORS
7
AUTHORS
@ -72,6 +72,7 @@ Brian Ober (bober@us.ibm.com)
|
||||
Brian Reitz (brian.reitz@oracle.com)
|
||||
Bryan Keller (kellerbr@us.ibm.com)
|
||||
Béla Vancsics (vancsics@inf.u-szeged.hu)
|
||||
Виль Суркин (vills@vills-pro.local)
|
||||
Caleb Tennis (caleb.tennis@gmail.com)
|
||||
Cao Xuan Hoang (hoangcx@vn.fujitsu.com)
|
||||
Carlos Cavanna (ccavanna@ca.ibm.com)
|
||||
@ -111,6 +112,7 @@ Dan Prince (dprince@redhat.com)
|
||||
dangming (dangming@unitedstack.com)
|
||||
Daniele Valeriani (daniele@dvaleriani.net)
|
||||
Darrell Bishop (darrell@swiftstack.com)
|
||||
Darryl Tam (dtam@swiftstack.com)
|
||||
David Goetz (david.goetz@rackspace.com)
|
||||
David Hadas (davidh@il.ibm.com)
|
||||
David Liu (david.liu@cn.ibm.com)
|
||||
@ -253,6 +255,7 @@ Martin Geisler (martin@geisler.net)
|
||||
Martin Kletzander (mkletzan@redhat.com)
|
||||
Maru Newby (mnewby@internap.com)
|
||||
Mathias Bjoerkqvist (mbj@zurich.ibm.com)
|
||||
Masaki Tsukuda (tsukuda.masaki@po.ntts.co.jp)
|
||||
Matt Kassawara (mkassawara@gmail.com)
|
||||
Matt Riedemann (mriedem@us.ibm.com)
|
||||
Matthew Oliver (matt@oliver.net.au)
|
||||
@ -274,6 +277,8 @@ Nakagawa Masaaki (nakagawamsa@nttdata.co.jp)
|
||||
Nakul Dahiwade (nakul.dahiwade@intel.com)
|
||||
Nam Nguyen Hoai (namnh@vn.fujitsu.com)
|
||||
Nandini Tata (nandini.tata@intel.com)
|
||||
Naoto Nishizono (nishizono.naoto@po.ntts.co.jp)
|
||||
Nassim Babaci (nassim.babaci@cloudwatt.com)
|
||||
Nathan Kinder (nkinder@redhat.com)
|
||||
Nelson Almeida (nelsonmarcos@gmail.com)
|
||||
Newptone (xingchao@unitedstack.com)
|
||||
@ -365,11 +370,13 @@ Victor Lowther (victor.lowther@gmail.com)
|
||||
Victor Rodionov (victor.rodionov@nexenta.com)
|
||||
Victor Stinner (vstinner@redhat.com)
|
||||
Viktor Varga (vvarga@inf.u-szeged.hu)
|
||||
Vil Surkin (mail@vills.me)
|
||||
Vincent Untz (vuntz@suse.com)
|
||||
Vladimir Vechkanov (vvechkanov@mirantis.com)
|
||||
Vu Cong Tuan (tuanvc@vn.fujitsu.com)
|
||||
vxlinux (yan.wei7@zte.com.cn)
|
||||
wanghongtaozz (wanghongtaozz@inspur.com)
|
||||
Wyllys Ingersoll (wyllys.ingersoll@evault.com)
|
||||
Wu Wenxiang (wu.wenxiang@99cloud.net)
|
||||
xhancar (pavel.hancar@gmail.com)
|
||||
XieYingYun (smokony@sina.com)
|
||||
|
@ -10,6 +10,10 @@ liberasurecode-dev [platform:dpkg]
|
||||
liberasurecode-devel [platform:rpm !platform:centos]
|
||||
libffi-dev [platform:dpkg]
|
||||
libffi-devel [platform:rpm]
|
||||
libxml2-dev [platform:dpkg]
|
||||
libxml2-devel [platform:rpm]
|
||||
libxslt-devel [platform:rpm]
|
||||
libxslt1-dev [platform:dpkg]
|
||||
memcached
|
||||
python-dev [platform:dpkg]
|
||||
python-devel [platform:rpm]
|
||||
|
209
doc/s3api/conf/ceph-known-failures-keystone.yaml
Normal file
209
doc/s3api/conf/ceph-known-failures-keystone.yaml
Normal file
@ -0,0 +1,209 @@
|
||||
ceph_s3:
|
||||
<nose.suite.ContextSuite context=s3tests.functional>:teardown: {status: KNOWN}
|
||||
<nose.suite.ContextSuite context=test_routing_generator>:setup: {status: KNOWN}
|
||||
s3tests.functional.test_headers.test_bucket_create_bad_authorization_invalid_aws2: {status: KNOWN}
|
||||
s3tests.functional.test_headers.test_bucket_create_bad_authorization_none: {status: KNOWN}
|
||||
s3tests.functional.test_headers.test_object_create_bad_authorization_invalid_aws2: {status: KNOWN}
|
||||
s3tests.functional.test_headers.test_object_create_bad_authorization_none: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_100_continue: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_atomic_conditional_write_1mb: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_atomic_dual_conditional_write_1mb: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_acl_default: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_acl_grant_email: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_acl_grant_email_notexist: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_acl_grant_nonexist_user: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_acl_grant_userid_fullcontrol: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_acl_grant_userid_read: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_acl_grant_userid_readacp: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_acl_grant_userid_write: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_acl_grant_userid_writeacp: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_acl_no_grants: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_acls_changes_persistent: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_acl_xml_fullcontrol: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_acl_xml_read: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_acl_xml_readacp: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_acl_xml_write: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_acl_xml_writeacp: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_create_exists: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_header_acl_grants: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_list_objects_anonymous: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_list_objects_anonymous_fail: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_recreate_not_overriding: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_cors_origin_response: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_cors_origin_wildcard: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_list_buckets_anonymous: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_list_buckets_invalid_auth: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_logging_toggle: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_multipart_resend_first_finishes_last: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_acl_full_control_verify_owner: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_acl_xml: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_acl_xml_read: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_acl_xml_readacp: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_acl_xml_write: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_acl_xml_writeacp: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_copy_canned_acl: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_copy_not_owned_object_bucket: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_copy_replacing_metadata: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_giveaway: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_header_acl_grants: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_raw_get: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_raw_get_bucket_acl: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_raw_get_bucket_gone: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_raw_get_object_acl: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_raw_get_object_gone: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_raw_put: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_raw_put_write_access: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_set_valid_acl: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_anonymous_request: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_authenticated_request: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_authenticated_request_bad_access_key: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_case_insensitive_condition_fields: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_condition_is_case_sensitive: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_escaped_field_values: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_expired_policy: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_expires_is_case_sensitive: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_ignored_header: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_invalid_access_key: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_invalid_content_length_argument: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_invalid_date_format: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_invalid_request_field_value: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_invalid_signature: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_missing_conditions_list: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_missing_content_length_argument: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_missing_expires_condition: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_missing_policy_condition: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_missing_signature: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_no_key_specified: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_request_missing_policy_specified_field: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_set_invalid_success_code: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_set_key_from_filename: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_set_success_code: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_success_redirect_action: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_upload_larger_than_chunk: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_upload_size_below_minimum: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_upload_size_limit_exceeded: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_user_specified_header: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_object_ifmatch_failed: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_object_ifmatch_good: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_object_ifmatch_nonexisted_failed: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_object_ifmatch_overwrite_existed_good: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_object_ifnonmatch_failed: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_object_ifnonmatch_good: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_object_ifnonmatch_nonexisted_good: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_object_ifnonmatch_overwrite_existed_failed: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_set_cors: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_stress_bucket_acls_changes: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioned_concurrent_object_create_and_remove: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioned_concurrent_object_create_concurrent_remove: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioned_object_acl: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_bucket_create_suspend: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_copy_obj_version: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_multi_object_delete: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_multi_object_delete_with_marker: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_multi_object_delete_with_marker_create: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_create_overwrite_multipart: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_create_read_remove: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_create_read_remove_head: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_create_versions_remove_all: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_create_versions_remove_special_names: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_list_marker: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_plain_null_version_overwrite: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_plain_null_version_overwrite_suspended: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_plain_null_version_removal: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_suspend_versions: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_suspend_versions_simple: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.check_can_test_website: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_bucket_private_redirectall_base: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_bucket_private_redirectall_path: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_bucket_private_redirectall_path_upgrade: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_nonexistant_bucket_rgw: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_nonexistant_bucket_s3: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_private_bucket_list_empty: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_private_bucket_list_empty_blockederrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_private_bucket_list_empty_gooderrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_private_bucket_list_empty_missingerrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_private_bucket_list_private_index: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_private_bucket_list_private_index_blockederrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_private_bucket_list_private_index_gooderrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_private_bucket_list_private_index_missingerrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_private_bucket_list_public_index: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_public_bucket_list_empty: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_public_bucket_list_empty_blockederrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_public_bucket_list_empty_gooderrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_public_bucket_list_empty_missingerrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_public_bucket_list_private_index: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_public_bucket_list_private_index_blockederrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_public_bucket_list_private_index_gooderrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_public_bucket_list_private_index_missingerrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_public_bucket_list_public_index: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_xredirect_nonwebsite: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_xredirect_private_abs: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_xredirect_private_relative: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_xredirect_public_abs: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_xredirect_public_relative: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_configure_recreate: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_list_return_data_versioning: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_policy: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_policy_acl: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_policy_another_bucket: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_policy_different_tenant: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_policy_set_condition_operator_end_with_IfExists: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_delete_tags_obj_public: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_encryption_sse_c_invalid_md5: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_encryption_sse_c_method_head: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_encryption_sse_c_multipart_bad_download: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_encryption_sse_c_multipart_invalid_chunks_1: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_encryption_sse_c_multipart_invalid_chunks_2: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_encryption_sse_c_no_key: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_encryption_sse_c_no_md5: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_encryption_sse_c_other_key: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_encryption_sse_c_post_object_authenticated_request: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_encryption_sse_c_present: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_get_obj_head_tagging: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_get_obj_tagging: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_get_tags_acl_public: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_deletemarker_expiration: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_expiration: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_expiration_date: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_get: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_get_no_id: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_id_too_long: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_multipart_expiration: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_noncur_expiration: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_rules_conflicted: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_same_id: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_set: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_set_date: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_set_deletemarker: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_set_empty_filter: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_set_filter: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_set_multipart: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_set_noncurrent: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_multipart_copy_invalid_range: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_multipart_copy_versioned: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_copy_versioned_bucket: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_copy_versioning_multipart_upload: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_empty_conditions: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_tags_anonymous_request: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_tags_authenticated_request: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_delete_tags: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_excess_key_tags: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_excess_tags: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_excess_val_tags: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_max_kvsize_tags: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_max_tags: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_modify_tags: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_obj_with_tags: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_tags_acl_public: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_method_head: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_multipart_invalid_chunks_1: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_multipart_invalid_chunks_2: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_multipart_upload: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_post_object_authenticated_request: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_present: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_read_declare: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_transfer_13b: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_transfer_1MB: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_transfer_1b: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_transfer_1kb: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioned_object_acl_no_version_specified: {status: KNOWN}
|
187
doc/s3api/conf/ceph-known-failures-tempauth.yaml
Normal file
187
doc/s3api/conf/ceph-known-failures-tempauth.yaml
Normal file
@ -0,0 +1,187 @@
|
||||
ceph_s3:
|
||||
<nose.suite.ContextSuite context=s3tests.functional>:teardown: {status: KNOWN}
|
||||
<nose.suite.ContextSuite context=test_routing_generator>:setup: {status: KNOWN}
|
||||
s3tests.functional.test_headers.test_bucket_create_bad_authorization_invalid_aws2: {status: KNOWN}
|
||||
s3tests.functional.test_headers.test_bucket_create_bad_authorization_none: {status: KNOWN}
|
||||
s3tests.functional.test_headers.test_object_create_bad_authorization_invalid_aws2: {status: KNOWN}
|
||||
s3tests.functional.test_headers.test_object_create_bad_authorization_none: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_100_continue: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_atomic_conditional_write_1mb: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_atomic_dual_conditional_write_1mb: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_acl_grant_email: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_acl_grant_email_notexist: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_acl_grant_nonexist_user: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_acl_no_grants: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_create_exists: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_header_acl_grants: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_list_objects_anonymous: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_list_objects_anonymous_fail: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_recreate_not_overriding: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_cors_origin_response: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_cors_origin_wildcard: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_list_buckets_anonymous: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_list_buckets_invalid_auth: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_logging_toggle: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_multipart_resend_first_finishes_last: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_copy_canned_acl: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_copy_replacing_metadata: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_header_acl_grants: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_raw_get: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_raw_get_bucket_acl: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_raw_get_bucket_gone: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_raw_get_object_acl: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_raw_get_object_gone: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_raw_put: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_raw_put_write_access: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_anonymous_request: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_authenticated_request: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_authenticated_request_bad_access_key: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_case_insensitive_condition_fields: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_condition_is_case_sensitive: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_escaped_field_values: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_expired_policy: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_expires_is_case_sensitive: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_ignored_header: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_invalid_access_key: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_invalid_content_length_argument: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_invalid_date_format: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_invalid_request_field_value: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_invalid_signature: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_missing_conditions_list: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_missing_content_length_argument: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_missing_expires_condition: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_missing_policy_condition: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_missing_signature: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_no_key_specified: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_request_missing_policy_specified_field: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_set_invalid_success_code: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_set_key_from_filename: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_set_success_code: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_success_redirect_action: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_upload_larger_than_chunk: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_upload_size_below_minimum: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_upload_size_limit_exceeded: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_user_specified_header: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_object_ifmatch_failed: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_object_ifmatch_good: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_object_ifmatch_nonexisted_failed: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_object_ifmatch_overwrite_existed_good: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_object_ifnonmatch_failed: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_object_ifnonmatch_good: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_object_ifnonmatch_nonexisted_good: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_object_ifnonmatch_overwrite_existed_failed: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_set_cors: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioned_concurrent_object_create_and_remove: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioned_concurrent_object_create_concurrent_remove: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioned_object_acl: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_bucket_create_suspend: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_copy_obj_version: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_multi_object_delete: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_multi_object_delete_with_marker: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_multi_object_delete_with_marker_create: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_create_overwrite_multipart: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_create_read_remove: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_create_read_remove_head: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_create_versions_remove_all: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_create_versions_remove_special_names: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_list_marker: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_plain_null_version_overwrite: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_plain_null_version_overwrite_suspended: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_plain_null_version_removal: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_suspend_versions: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioning_obj_suspend_versions_simple: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.check_can_test_website: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_bucket_private_redirectall_base: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_bucket_private_redirectall_path: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_bucket_private_redirectall_path_upgrade: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_nonexistant_bucket_rgw: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_nonexistant_bucket_s3: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_private_bucket_list_empty: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_private_bucket_list_empty_blockederrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_private_bucket_list_empty_gooderrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_private_bucket_list_empty_missingerrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_private_bucket_list_private_index: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_private_bucket_list_private_index_blockederrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_private_bucket_list_private_index_gooderrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_private_bucket_list_private_index_missingerrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_private_bucket_list_public_index: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_public_bucket_list_empty: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_public_bucket_list_empty_blockederrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_public_bucket_list_empty_gooderrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_public_bucket_list_empty_missingerrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_public_bucket_list_private_index: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_public_bucket_list_private_index_blockederrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_public_bucket_list_private_index_gooderrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_public_bucket_list_private_index_missingerrordoc: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_public_bucket_list_public_index: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_xredirect_nonwebsite: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_xredirect_private_abs: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_xredirect_private_relative: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_xredirect_public_abs: {status: KNOWN}
|
||||
s3tests.functional.test_s3_website.test_website_xredirect_public_relative: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_configure_recreate: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_list_return_data_versioning: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_policy: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_policy_acl: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_policy_another_bucket: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_policy_different_tenant: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_bucket_policy_set_condition_operator_end_with_IfExists: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_delete_tags_obj_public: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_encryption_sse_c_invalid_md5: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_encryption_sse_c_method_head: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_encryption_sse_c_multipart_bad_download: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_encryption_sse_c_multipart_invalid_chunks_1: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_encryption_sse_c_multipart_invalid_chunks_2: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_encryption_sse_c_no_key: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_encryption_sse_c_no_md5: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_encryption_sse_c_other_key: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_encryption_sse_c_post_object_authenticated_request: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_encryption_sse_c_present: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_get_obj_head_tagging: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_get_obj_tagging: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_get_tags_acl_public: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_deletemarker_expiration: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_expiration: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_expiration_date: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_get: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_get_no_id: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_id_too_long: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_multipart_expiration: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_noncur_expiration: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_rules_conflicted: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_same_id: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_set: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_set_date: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_set_deletemarker: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_set_empty_filter: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_set_filter: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_set_multipart: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_lifecycle_set_noncurrent: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_multipart_copy_invalid_range: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_multipart_copy_versioned: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_copy_versioned_bucket: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_copy_versioning_multipart_upload: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_empty_conditions: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_tags_anonymous_request: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_post_object_tags_authenticated_request: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_delete_tags: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_excess_key_tags: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_excess_tags: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_excess_val_tags: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_max_kvsize_tags: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_max_tags: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_modify_tags: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_obj_with_tags: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_put_tags_acl_public: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_method_head: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_multipart_invalid_chunks_1: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_multipart_invalid_chunks_2: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_multipart_upload: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_post_object_authenticated_request: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_present: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_read_declare: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_transfer_13b: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_transfer_1MB: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_transfer_1b: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_sse_kms_transfer_1kb: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_versioned_object_acl_no_version_specified: {status: KNOWN}
|
18
doc/s3api/conf/ceph-s3.conf.in
Normal file
18
doc/s3api/conf/ceph-s3.conf.in
Normal file
@ -0,0 +1,18 @@
|
||||
[DEFAULT]
|
||||
host = localhost
|
||||
port = 8080
|
||||
is_secure = no
|
||||
|
||||
[s3 main]
|
||||
user_id = %ADMIN_ACCESS_KEY%
|
||||
display_name = %ADMIN_ACCESS_KEY%
|
||||
email = %ADMIN_ACCESS_KEY%
|
||||
access_key = %ADMIN_ACCESS_KEY%
|
||||
secret_key = %ADMIN_SECRET_KEY%
|
||||
|
||||
[s3 alt]
|
||||
user_id = %TESTER_ACCESS_KEY%
|
||||
display_name = %TESTER_ACCESS_KEY%
|
||||
email = %TESTER_ACCESS_KEY%
|
||||
access_key = %TESTER_ACCESS_KEY%
|
||||
secret_key = %TESTER_SECRET_KEY%
|
17
doc/s3api/conf/object-server.conf.in
Normal file
17
doc/s3api/conf/object-server.conf.in
Normal file
@ -0,0 +1,17 @@
|
||||
[DEFAULT]
|
||||
user = %USER%
|
||||
bind_port = 6000
|
||||
swift_dir = %TEST_DIR%/etc
|
||||
devices = %TEST_DIR%
|
||||
mount_check = false
|
||||
workers = 1
|
||||
log_level = DEBUG
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = object-server
|
||||
|
||||
[app:object-server]
|
||||
use = egg:swift#object
|
||||
allowed_headers = Cache-Control, Content-Disposition, Content-Encoding,
|
||||
Content-Language, Expires, X-Delete-At, X-Object-Manifest, X-Robots-Tag,
|
||||
X-Static-Large-Object
|
7
doc/s3api/rnc/access_control_policy.rnc
Normal file
7
doc/s3api/rnc/access_control_policy.rnc
Normal file
@ -0,0 +1,7 @@
|
||||
include "common.rnc"
|
||||
|
||||
start =
|
||||
element AccessControlPolicy {
|
||||
element Owner { CanonicalUser } &
|
||||
element AccessControlList { AccessControlList }
|
||||
}
|
10
doc/s3api/rnc/bucket_logging_status.rnc
Normal file
10
doc/s3api/rnc/bucket_logging_status.rnc
Normal file
@ -0,0 +1,10 @@
|
||||
include "common.rnc"
|
||||
|
||||
start =
|
||||
element BucketLoggingStatus {
|
||||
element LoggingEnabled {
|
||||
element TargetBucket { xsd:string } &
|
||||
element TargetPrefix { xsd:string } &
|
||||
element TargetGrants { AccessControlList }?
|
||||
}?
|
||||
}
|
26
doc/s3api/rnc/common.rnc
Normal file
26
doc/s3api/rnc/common.rnc
Normal file
@ -0,0 +1,26 @@
|
||||
namespace xsi = "http://www.w3.org/2001/XMLSchema-instance"
|
||||
|
||||
CanonicalUser =
|
||||
element ID { xsd:string } &
|
||||
element DisplayName { xsd:string }?
|
||||
|
||||
StorageClass = "STANDARD" | "REDUCED_REDUNDANCY" | "GLACIER" | "UNKNOWN"
|
||||
|
||||
AccessControlList =
|
||||
element Grant {
|
||||
element Grantee {
|
||||
(
|
||||
attribute xsi:type { "AmazonCustomerByEmail" },
|
||||
element EmailAddress { xsd:string }
|
||||
) | (
|
||||
attribute xsi:type { "CanonicalUser" },
|
||||
CanonicalUser
|
||||
) | (
|
||||
attribute xsi:type { "Group" },
|
||||
element URI { xsd:string }
|
||||
)
|
||||
} &
|
||||
element Permission {
|
||||
"READ" | "WRITE" | "READ_ACP" | "WRITE_ACP" | "FULL_CONTROL"
|
||||
}
|
||||
}*
|
7
doc/s3api/rnc/complete_multipart_upload.rnc
Normal file
7
doc/s3api/rnc/complete_multipart_upload.rnc
Normal file
@ -0,0 +1,7 @@
|
||||
start =
|
||||
element CompleteMultipartUpload {
|
||||
element Part {
|
||||
element PartNumber { xsd:int } &
|
||||
element ETag { xsd:string }
|
||||
}+
|
||||
}
|
7
doc/s3api/rnc/complete_multipart_upload_result.rnc
Normal file
7
doc/s3api/rnc/complete_multipart_upload_result.rnc
Normal file
@ -0,0 +1,7 @@
|
||||
start =
|
||||
element CompleteMultipartUploadResult {
|
||||
element Location { xsd:anyURI },
|
||||
element Bucket { xsd:string },
|
||||
element Key { xsd:string },
|
||||
element ETag { xsd:string }
|
||||
}
|
5
doc/s3api/rnc/copy_object_result.rnc
Normal file
5
doc/s3api/rnc/copy_object_result.rnc
Normal file
@ -0,0 +1,5 @@
|
||||
start =
|
||||
element CopyObjectResult {
|
||||
element LastModified { xsd:dateTime },
|
||||
element ETag { xsd:string }
|
||||
}
|
5
doc/s3api/rnc/copy_part_result.rnc
Normal file
5
doc/s3api/rnc/copy_part_result.rnc
Normal file
@ -0,0 +1,5 @@
|
||||
start =
|
||||
element CopyPartResult {
|
||||
element LastModified { xsd:dateTime },
|
||||
element ETag { xsd:string }
|
||||
}
|
4
doc/s3api/rnc/create_bucket_configuration.rnc
Normal file
4
doc/s3api/rnc/create_bucket_configuration.rnc
Normal file
@ -0,0 +1,4 @@
|
||||
start =
|
||||
element * {
|
||||
element LocationConstraint { xsd:string }
|
||||
}
|
8
doc/s3api/rnc/delete.rnc
Normal file
8
doc/s3api/rnc/delete.rnc
Normal file
@ -0,0 +1,8 @@
|
||||
start =
|
||||
element Delete {
|
||||
element Quiet { xsd:boolean }? &
|
||||
element Object {
|
||||
element Key { xsd:string } &
|
||||
element VersionId { xsd:string }?
|
||||
}+
|
||||
}
|
17
doc/s3api/rnc/delete_result.rnc
Normal file
17
doc/s3api/rnc/delete_result.rnc
Normal file
@ -0,0 +1,17 @@
|
||||
start =
|
||||
element DeleteResult {
|
||||
(
|
||||
element Deleted {
|
||||
element Key { xsd:string },
|
||||
element VersionId { xsd:string }?,
|
||||
element DeleteMarker { xsd:boolean }?,
|
||||
element DeleteMarkerVersionId { xsd:string }?
|
||||
} |
|
||||
element Error {
|
||||
element Key { xsd:string },
|
||||
element VersionId { xsd:string }?,
|
||||
element Code { xsd:string },
|
||||
element Message { xsd:string }
|
||||
}
|
||||
)*
|
||||
}
|
11
doc/s3api/rnc/error.rnc
Normal file
11
doc/s3api/rnc/error.rnc
Normal file
@ -0,0 +1,11 @@
|
||||
start =
|
||||
element Error {
|
||||
element Code { xsd:string },
|
||||
element Message { xsd:string },
|
||||
DebugInfo*
|
||||
}
|
||||
|
||||
DebugInfo =
|
||||
element * {
|
||||
(attribute * { text } | text | DebugInfo)*
|
||||
}
|
6
doc/s3api/rnc/initiate_multipart_upload_result.rnc
Normal file
6
doc/s3api/rnc/initiate_multipart_upload_result.rnc
Normal file
@ -0,0 +1,6 @@
|
||||
start =
|
||||
element InitiateMultipartUploadResult {
|
||||
element Bucket { xsd:string },
|
||||
element Key { xsd:string },
|
||||
element UploadId { xsd:string }
|
||||
}
|
20
doc/s3api/rnc/lifecycle_configuration.rnc
Normal file
20
doc/s3api/rnc/lifecycle_configuration.rnc
Normal file
@ -0,0 +1,20 @@
|
||||
include "common.rnc"
|
||||
|
||||
start =
|
||||
element LifecycleConfiguration {
|
||||
element Rule {
|
||||
element ID { xsd:string }? &
|
||||
element Prefix { xsd:string } &
|
||||
element Status { "Enabled" | "Disabled" } &
|
||||
element Transition { Transition }? &
|
||||
element Expiration { Expiration }?
|
||||
}+
|
||||
}
|
||||
|
||||
Expiration =
|
||||
element Days { xsd:int } |
|
||||
element Date { xsd:dateTime }
|
||||
|
||||
Transition =
|
||||
Expiration &
|
||||
element StorageClass { StorageClass }
|
12
doc/s3api/rnc/list_all_my_buckets_result.rnc
Normal file
12
doc/s3api/rnc/list_all_my_buckets_result.rnc
Normal file
@ -0,0 +1,12 @@
|
||||
include "common.rnc"
|
||||
|
||||
start =
|
||||
element ListAllMyBucketsResult {
|
||||
element Owner { CanonicalUser },
|
||||
element Buckets {
|
||||
element Bucket {
|
||||
element Name { xsd:string },
|
||||
element CreationDate { xsd:dateTime }
|
||||
}*
|
||||
}
|
||||
}
|
33
doc/s3api/rnc/list_bucket_result.rnc
Normal file
33
doc/s3api/rnc/list_bucket_result.rnc
Normal file
@ -0,0 +1,33 @@
|
||||
include "common.rnc"
|
||||
|
||||
start =
|
||||
element ListBucketResult {
|
||||
element Name { xsd:string },
|
||||
element Prefix { xsd:string },
|
||||
(
|
||||
(
|
||||
element Marker { xsd:string },
|
||||
element NextMarker { xsd:string }?
|
||||
) | (
|
||||
element NextContinuationToken { xsd:string }?,
|
||||
element ContinuationToken { xsd:string }?,
|
||||
element StartAfter { xsd:string }?,
|
||||
element KeyCount { xsd:int }
|
||||
)
|
||||
),
|
||||
element MaxKeys { xsd:int },
|
||||
element EncodingType { xsd:string }?,
|
||||
element Delimiter { xsd:string }?,
|
||||
element IsTruncated { xsd:boolean },
|
||||
element Contents {
|
||||
element Key { xsd:string },
|
||||
element LastModified { xsd:dateTime },
|
||||
element ETag { xsd:string },
|
||||
element Size { xsd:long },
|
||||
element Owner { CanonicalUser }?,
|
||||
element StorageClass { StorageClass }
|
||||
}*,
|
||||
element CommonPrefixes {
|
||||
element Prefix { xsd:string }
|
||||
}*
|
||||
}
|
26
doc/s3api/rnc/list_multipart_uploads_result.rnc
Normal file
26
doc/s3api/rnc/list_multipart_uploads_result.rnc
Normal file
@ -0,0 +1,26 @@
|
||||
include "common.rnc"
|
||||
|
||||
start =
|
||||
element ListMultipartUploadsResult {
|
||||
element Bucket { xsd:string },
|
||||
element KeyMarker { xsd:string },
|
||||
element UploadIdMarker { xsd:string },
|
||||
element NextKeyMarker { xsd:string },
|
||||
element NextUploadIdMarker { xsd:string },
|
||||
element Delimiter { xsd:string }?,
|
||||
element Prefix { xsd:string }?,
|
||||
element MaxUploads { xsd:int },
|
||||
element EncodingType { xsd:string }?,
|
||||
element IsTruncated { xsd:boolean },
|
||||
element Upload {
|
||||
element Key { xsd:string },
|
||||
element UploadId { xsd:string },
|
||||
element Initiator { CanonicalUser },
|
||||
element Owner { CanonicalUser },
|
||||
element StorageClass { StorageClass },
|
||||
element Initiated { xsd:dateTime }
|
||||
}*,
|
||||
element CommonPrefixes {
|
||||
element Prefix { xsd:string }
|
||||
}*
|
||||
}
|
22
doc/s3api/rnc/list_parts_result.rnc
Normal file
22
doc/s3api/rnc/list_parts_result.rnc
Normal file
@ -0,0 +1,22 @@
|
||||
include "common.rnc"
|
||||
|
||||
start =
|
||||
element ListPartsResult {
|
||||
element Bucket { xsd:string },
|
||||
element Key { xsd:string },
|
||||
element UploadId { xsd:string },
|
||||
element Initiator { CanonicalUser },
|
||||
element Owner { CanonicalUser },
|
||||
element StorageClass { StorageClass },
|
||||
element PartNumberMarker { xsd:int },
|
||||
element NextPartNumberMarker { xsd:int },
|
||||
element MaxParts { xsd:int },
|
||||
element EncodingType { xsd:string }?,
|
||||
element IsTruncated { xsd:boolean },
|
||||
element Part {
|
||||
element PartNumber { xsd:int },
|
||||
element LastModified { xsd:dateTime },
|
||||
element ETag { xsd:string },
|
||||
element Size { xsd:long }
|
||||
}*
|
||||
}
|
37
doc/s3api/rnc/list_versions_result.rnc
Normal file
37
doc/s3api/rnc/list_versions_result.rnc
Normal file
@ -0,0 +1,37 @@
|
||||
include "common.rnc"
|
||||
|
||||
start =
|
||||
element ListVersionsResult {
|
||||
element Name { xsd:string },
|
||||
element Prefix { xsd:string },
|
||||
element KeyMarker { xsd:string },
|
||||
element VersionIdMarker { xsd:string },
|
||||
element NextKeyMarker { xsd:string }?,
|
||||
element NextVersionIdMarker { xsd:string }?,
|
||||
element MaxKeys { xsd:int },
|
||||
element EncodingType { xsd:string }?,
|
||||
element Delimiter { xsd:string }?,
|
||||
element IsTruncated { xsd:boolean },
|
||||
(
|
||||
element Version {
|
||||
element Key { xsd:string },
|
||||
element VersionId { xsd:string },
|
||||
element IsLatest { xsd:boolean },
|
||||
element LastModified { xsd:dateTime },
|
||||
element ETag { xsd:string },
|
||||
element Size { xsd:long },
|
||||
element Owner { CanonicalUser }?,
|
||||
element StorageClass { StorageClass }
|
||||
} |
|
||||
element DeleteMarker {
|
||||
element Key { xsd:string },
|
||||
element VersionId { xsd:string },
|
||||
element IsLatest { xsd:boolean },
|
||||
element LastModified { xsd:dateTime },
|
||||
element Owner { CanonicalUser }?
|
||||
}
|
||||
)*,
|
||||
element CommonPrefixes {
|
||||
element Prefix { xsd:string }
|
||||
}*
|
||||
}
|
1
doc/s3api/rnc/location_constraint.rnc
Normal file
1
doc/s3api/rnc/location_constraint.rnc
Normal file
@ -0,0 +1 @@
|
||||
start = element LocationConstraint { xsd:string }
|
5
doc/s3api/rnc/versioning_configuration.rnc
Normal file
5
doc/s3api/rnc/versioning_configuration.rnc
Normal file
@ -0,0 +1,5 @@
|
||||
start =
|
||||
element VersioningConfiguration {
|
||||
element Status { "Enabled" | "Suspended" }? &
|
||||
element MfaDelete { "Enabled" | "Disabled" }?
|
||||
}
|
@ -108,7 +108,6 @@ Alternative API
|
||||
|
||||
* `ProxyFS <https://github.com/swiftstack/ProxyFS>`_ - Integrated file and
|
||||
object access for Swift object storage
|
||||
* `Swift3 <https://github.com/openstack/swift3>`_ - Amazon S3 API emulation.
|
||||
* `SwiftHLM <https://github.com/ibm-research/SwiftHLM>`_ - a middleware for
|
||||
using OpenStack Swift with tape and other high latency media storage
|
||||
backends.
|
||||
@ -177,3 +176,4 @@ Other
|
||||
web browser
|
||||
* `swiftbackmeup <https://github.com/redhat-cip/swiftbackmeup>`_ -
|
||||
Utility that allows one to create backups and upload them to OpenStack Swift
|
||||
* `s3compat <https://github.com/swiftstack/s3compat>`_ - S3 API compatibility checker
|
||||
|
@ -11,6 +11,95 @@ Account Quotas
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _s3api:
|
||||
|
||||
AWS S3 Api
|
||||
==========
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.s3api
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.s3token
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.s3request
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.s3response
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.exception
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.etree
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.utils
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.subresource
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.acl_handlers
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.acl_utils
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.controllers.base
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.controllers.service
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.controllers.bucket
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.controllers.obj
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.controllers.acl
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.controllers.s3_acl
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.controllers.multi_upload
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.controllers.multi_delete
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.controllers.versioning
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.controllers.location
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.controllers.logging
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _bulk:
|
||||
|
||||
Bulk Operations (Delete and Archive Auto Extraction)
|
||||
|
@ -120,6 +120,8 @@ use = egg:swift#object
|
||||
# Comma separated list of headers that can be set in metadata on an object.
|
||||
# This list is in addition to X-Object-Meta-* headers and cannot include
|
||||
# Content-Type, etag, Content-Length, or deleted
|
||||
# Note that you may add some extra headers for better S3 compatibility, they are:
|
||||
# Cache-Control, Content-Language, Expires, and X-Robots-Tag
|
||||
# allowed_headers = Content-Disposition, Content-Encoding, X-Delete-At, X-Object-Manifest, X-Static-Large-Object
|
||||
#
|
||||
# auto_create_account_prefix = .
|
||||
|
@ -442,6 +442,145 @@ user_test5_tester5 = testing5 service
|
||||
# in ACLs by setting allow_names_in_acls to false:
|
||||
# allow_names_in_acls = true
|
||||
|
||||
[filter:s3api]
|
||||
use = egg:swift#s3api
|
||||
|
||||
# s3api setup:
|
||||
#
|
||||
# With either tempauth or your custom auth:
|
||||
# - Put s3api just before your auth filter(s) in the pipeline
|
||||
# With keystone:
|
||||
# - Put s3api and s3token before keystoneauth in the pipeline
|
||||
#
|
||||
# Swift has no concept of the S3's resource owner; the resources
|
||||
# (i.e. containers and objects) created via the Swift API have no owner
|
||||
# information. This option specifies how the s3api middleware handles them
|
||||
# with the S3 API. If this option is 'false', such kinds of resources will be
|
||||
# invisible and no users can access them with the S3 API. If set to 'true',
|
||||
# a resource without an owner belongs to everyone and everyone can access it
|
||||
# with the S3 API. If you care about S3 compatibility, set 'false' here. This
|
||||
# option makes sense only when the s3_acl option is set to 'true' and your
|
||||
# Swift cluster has the resources created via the Swift API.
|
||||
# allow_no_owner = false
|
||||
#
|
||||
# Set a region name of your Swift cluster. Note that the s3api doesn't choose
|
||||
# a region of the newly created bucket. This value is used for the
|
||||
# GET Bucket location API and v4 signatures calculation.
|
||||
# location = US
|
||||
#
|
||||
# Set whether to enforce DNS-compliant bucket names. Note that S3 enforces
|
||||
# these conventions in all regions except the US Standard region.
|
||||
# dns_compliant_bucket_names = True
|
||||
#
|
||||
# Set the default maximum number of objects returned in the GET Bucket
|
||||
# response.
|
||||
# max_bucket_listing = 1000
|
||||
#
|
||||
# Set the maximum number of parts returned in the List Parts operation.
|
||||
# (default: 1000 as well as S3 specification)
|
||||
# If setting it larger than 10000 (swift container_listing_limit default)
|
||||
# make sure you also increase the container_listing_limit in swift.conf.
|
||||
# max_parts_listing = 1000
|
||||
#
|
||||
# Set the maximum number of objects we can delete with the Multi-Object Delete
|
||||
# operation.
|
||||
# max_multi_delete_objects = 1000
|
||||
#
|
||||
# If set to 'true', s3api uses its own metadata for ACLs
|
||||
# (e.g. X-Container-Sysmeta-S3Api-Acl) to achieve the best S3 compatibility.
|
||||
# If set to 'false', s3api tries to use Swift ACLs (e.g. X-Container-Read)
|
||||
# instead of S3 ACLs as far as possible.
|
||||
# There are some caveats that one should know about this setting. Firstly,
|
||||
# if set to 'false' after being previously set to 'true' any new objects or
|
||||
# containers stored while 'true' setting will be accessible to all users
|
||||
# because the s3 ACLs will be ignored under s3_acl=False setting. Secondly,
|
||||
# s3_acl True mode don't keep ACL consistency between both the S3 and Swift
|
||||
# API. Meaning with s3_acl enabled S3 ACLs only effect objects and buckets
|
||||
# via the S3 API. As this ACL information wont be available via the Swift API
|
||||
# and so the ACL wont be applied.
|
||||
# Note that s3_acl currently supports only keystone and tempauth.
|
||||
# DON'T USE THIS for production before enough testing for your use cases.
|
||||
# This stuff is still under development and it might cause something
|
||||
# you don't expect.
|
||||
# s3_acl = false
|
||||
#
|
||||
# Specify a host name of your Swift cluster. This enables virtual-hosted style
|
||||
# requests.
|
||||
# storage_domain =
|
||||
#
|
||||
# Enable pipeline order check for SLO, s3token, authtoken, keystoneauth
|
||||
# according to standard s3api/Swift construction using either tempauth or
|
||||
# keystoneauth. If the order is incorrect, it raises an exception to stop
|
||||
# proxy. Turn auth_pipeline_check off only when you want to bypass these
|
||||
# authenticate middlewares in order to use other 3rd party (or your
|
||||
# proprietary) authenticate middleware.
|
||||
# auth_pipeline_check = True
|
||||
#
|
||||
# Enable multi-part uploads. (default: true)
|
||||
# This is required to store files larger than Swift's max_file_size (by
|
||||
# default, 5GiB). Note that has performance implications when deleting objects,
|
||||
# as we now have to check for whether there are also segments to delete.
|
||||
# allow_multipart_uploads = True
|
||||
#
|
||||
# Set the maximum number of parts for Upload Part operation.(default: 1000)
|
||||
# When setting it to be larger than the default value in order to match the
|
||||
# specification of S3, set to be larger max_manifest_segments for slo
|
||||
# middleware.(specification of S3: 10000)
|
||||
# max_upload_part_num = 1000
|
||||
#
|
||||
# Enable returning only buckets which owner are the user who requested
|
||||
# GET Service operation. (default: false)
|
||||
# If you want to enable the above feature, set this and s3_acl to true.
|
||||
# That might cause significant performance degradation. So, only if your
|
||||
# service absolutely need this feature, set this setting to true.
|
||||
# If you set this to false, s3api returns all buckets.
|
||||
# check_bucket_owner = false
|
||||
#
|
||||
# By default, Swift reports only S3 style access log.
|
||||
# (e.g. PUT /bucket/object) If set force_swift_request_proxy_log
|
||||
# to be 'true', Swift will become to output Swift style log
|
||||
# (e.g. PUT /v1/account/container/object) in addition to S3 style log.
|
||||
# Note that they will be reported twice (i.e. s3api doesn't care about
|
||||
# the duplication) and Swift style log will includes also various subrequests
|
||||
# to achieve S3 compatibilities when force_swift_request_proxy_log is set to
|
||||
# 'true'
|
||||
# force_swift_request_proxy_log = false
|
||||
#
|
||||
# AWS S3 document says that each part must be at least 5 MB in a multipart
|
||||
# upload, except the last part.
|
||||
# min_segment_size = 5242880
|
||||
|
||||
# You can override the default log routing for this filter here:
|
||||
# log_name = s3api
|
||||
|
||||
[filter:s3token]
|
||||
# s3token middleware authenticates with keystone using the s3 credentials
|
||||
# provided in the request header. Please put s3token between s3api
|
||||
# and keystoneauth if you're using keystoneauth.
|
||||
use = egg:swift#s3token
|
||||
|
||||
# Prefix that will be prepended to the tenant to form the account
|
||||
reseller_prefix = AUTH_
|
||||
|
||||
# By default, s3token will reject all invalid S3-style requests. Set this to
|
||||
# True to delegate that decision to downstream WSGI components. This may be
|
||||
# useful if there are multiple auth systems in the proxy pipeline.
|
||||
delay_auth_decision = False
|
||||
|
||||
# Keystone server details
|
||||
auth_uri = http://keystonehost:35357/v3
|
||||
|
||||
# Connect/read timeout to use when communicating with Keystone
|
||||
http_timeout = 10.0
|
||||
|
||||
# SSL-related options
|
||||
# insecure = False
|
||||
# certfile =
|
||||
# keyfile =
|
||||
|
||||
# You can override the default log routing for this filter here:
|
||||
# log_name = s3token
|
||||
|
||||
[filter:healthcheck]
|
||||
use = egg:swift#healthcheck
|
||||
# An optional filesystem path, which if present, will cause the healthcheck
|
||||
|
@ -7,6 +7,8 @@ eventlet>=0.17.4 # MIT
|
||||
greenlet>=0.3.1
|
||||
netifaces>=0.5,!=0.10.0,!=0.10.1
|
||||
PasteDeploy>=1.3.3
|
||||
lxml
|
||||
requests>=2.14.2 # Apache-2.0
|
||||
six>=1.9.0
|
||||
xattr>=0.4
|
||||
PyECLib>=1.3.1 # BSD
|
||||
|
@ -110,6 +110,8 @@ paste.filter_factory =
|
||||
kms_keymaster = swift.common.middleware.crypto.kms_keymaster:filter_factory
|
||||
listing_formats = swift.common.middleware.listing_formats:filter_factory
|
||||
symlink = swift.common.middleware.symlink:filter_factory
|
||||
s3api = swift.common.middleware.s3api.s3api:filter_factory
|
||||
s3token = swift.common.middleware.s3api.s3token:filter_factory
|
||||
|
||||
|
||||
[egg_info]
|
||||
|
0
swift/common/middleware/s3api/__init__.py
Normal file
0
swift/common/middleware/s3api/__init__.py
Normal file
479
swift/common/middleware/s3api/acl_handlers.py
Normal file
479
swift/common/middleware/s3api/acl_handlers.py
Normal file
@ -0,0 +1,479 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
------------
|
||||
Acl Handlers
|
||||
------------
|
||||
|
||||
Why do we need this
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
To make controller classes clean, we need these handlers.
|
||||
It is really useful for customizing acl checking algorithms for
|
||||
each controller.
|
||||
|
||||
Basic Information
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
BaseAclHandler wraps basic Acl handling.
|
||||
(i.e. it will check acl from ACL_MAP by using HEAD)
|
||||
|
||||
How to extend
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
Make a handler with the name of the controller.
|
||||
(e.g. BucketAclHandler is for BucketController)
|
||||
It consists of method(s) for actual S3 method on controllers as follows.
|
||||
|
||||
Example::
|
||||
|
||||
class BucketAclHandler(BaseAclHandler):
|
||||
def PUT:
|
||||
<< put acl handling algorithms here for PUT bucket >>
|
||||
|
||||
.. note::
|
||||
If the method DON'T need to recall _get_response in outside of
|
||||
acl checking, the method have to return the response it needs at
|
||||
the end of method.
|
||||
|
||||
"""
|
||||
import sys
|
||||
|
||||
from swift.common.middleware.s3api.subresource import ACL, Owner, encode_acl
|
||||
from swift.common.middleware.s3api.s3response import MissingSecurityHeader, \
|
||||
MalformedACLError, UnexpectedContent
|
||||
from swift.common.middleware.s3api.etree import fromstring, XMLSyntaxError, \
|
||||
DocumentInvalid
|
||||
from swift.common.middleware.s3api.utils import MULTIUPLOAD_SUFFIX, \
|
||||
sysmeta_header
|
||||
from contextlib import contextmanager
|
||||
|
||||
|
||||
def get_acl_handler(controller_name):
|
||||
for base_klass in [BaseAclHandler, MultiUploadAclHandler]:
|
||||
# pylint: disable-msg=E1101
|
||||
for handler in base_klass.__subclasses__():
|
||||
handler_suffix_len = len('AclHandler') \
|
||||
if not handler.__name__ == 'S3AclHandler' else len('Handler')
|
||||
if handler.__name__[:-handler_suffix_len] == controller_name:
|
||||
return handler
|
||||
return BaseAclHandler
|
||||
|
||||
|
||||
class BaseAclHandler(object):
|
||||
"""
|
||||
BaseAclHandler: Handling ACL for basic requests mapped on ACL_MAP
|
||||
"""
|
||||
def __init__(self, req, logger):
|
||||
self.req = req
|
||||
self.container = self.req.container_name
|
||||
self.obj = self.req.object_name
|
||||
self.method = req.environ['REQUEST_METHOD']
|
||||
self.user_id = self.req.user_id
|
||||
self.headers = self.req.headers
|
||||
self.logger = logger
|
||||
|
||||
@contextmanager
|
||||
def request_with(self, container=None, obj=None, headers=None):
|
||||
try:
|
||||
org_cont = self.container
|
||||
org_obj = self.obj
|
||||
org_headers = self.headers
|
||||
|
||||
self.container = container or org_cont
|
||||
self.obj = obj or org_obj
|
||||
self.headers = headers or org_headers
|
||||
yield
|
||||
|
||||
finally:
|
||||
self.container = org_cont
|
||||
self.obj = org_obj
|
||||
self.headers = org_headers
|
||||
|
||||
def handle_acl(self, app, method, container=None, obj=None, headers=None):
|
||||
method = method or self.method
|
||||
|
||||
with self.request_with(container, obj, headers):
|
||||
if hasattr(self, method):
|
||||
return getattr(self, method)(app)
|
||||
else:
|
||||
return self._handle_acl(app, method)
|
||||
|
||||
def _handle_acl(self, app, sw_method, container=None, obj=None,
|
||||
permission=None, headers=None):
|
||||
"""
|
||||
General acl handling method.
|
||||
This method expects to call Request._get_response() in outside of
|
||||
this method so that this method returns response only when sw_method
|
||||
is HEAD.
|
||||
"""
|
||||
|
||||
container = self.container if container is None else container
|
||||
obj = self.obj if obj is None else obj
|
||||
sw_method = sw_method or self.req.environ['REQUEST_METHOD']
|
||||
resource = 'object' if obj else 'container'
|
||||
headers = self.headers if headers is None else headers
|
||||
|
||||
self.logger.debug(
|
||||
'checking permission: %s %s %s %s' %
|
||||
(container, obj, sw_method, dict(headers)))
|
||||
|
||||
if not container:
|
||||
return
|
||||
|
||||
if not permission and (self.method, sw_method, resource) in ACL_MAP:
|
||||
acl_check = ACL_MAP[(self.method, sw_method, resource)]
|
||||
resource = acl_check.get('Resource') or resource
|
||||
permission = acl_check['Permission']
|
||||
|
||||
if not permission:
|
||||
self.logger.debug(
|
||||
'%s %s %s %s' % (container, obj, sw_method, headers))
|
||||
raise Exception('No permission to be checked exists')
|
||||
|
||||
if resource == 'object':
|
||||
resp = self.req.get_acl_response(app, 'HEAD',
|
||||
container, obj,
|
||||
headers)
|
||||
acl = resp.object_acl
|
||||
elif resource == 'container':
|
||||
resp = self.req.get_acl_response(app, 'HEAD',
|
||||
container, '')
|
||||
acl = resp.bucket_acl
|
||||
|
||||
try:
|
||||
acl.check_permission(self.user_id, permission)
|
||||
except Exception as e:
|
||||
self.logger.debug(acl)
|
||||
self.logger.debug('permission denined: %s %s %s' %
|
||||
(e, self.user_id, permission))
|
||||
raise
|
||||
|
||||
if sw_method == 'HEAD':
|
||||
return resp
|
||||
|
||||
def get_acl(self, headers, body, bucket_owner, object_owner=None):
|
||||
"""
|
||||
Get ACL instance from S3 (e.g. x-amz-grant) headers or S3 acl xml body.
|
||||
"""
|
||||
acl = ACL.from_headers(headers, bucket_owner, object_owner,
|
||||
as_private=False)
|
||||
|
||||
if acl is None:
|
||||
# Get acl from request body if possible.
|
||||
if not body:
|
||||
raise MissingSecurityHeader(missing_header_name='x-amz-acl')
|
||||
try:
|
||||
elem = fromstring(body, ACL.root_tag)
|
||||
acl = ACL.from_elem(
|
||||
elem, True, self.req.allow_no_owner)
|
||||
except(XMLSyntaxError, DocumentInvalid):
|
||||
raise MalformedACLError()
|
||||
except Exception as e:
|
||||
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
self.logger.error(e)
|
||||
raise exc_type, exc_value, exc_traceback
|
||||
else:
|
||||
if body:
|
||||
# Specifying grant with both header and xml is not allowed.
|
||||
raise UnexpectedContent()
|
||||
|
||||
return acl
|
||||
|
||||
|
||||
class BucketAclHandler(BaseAclHandler):
|
||||
"""
|
||||
BucketAclHandler: Handler for BucketController
|
||||
"""
|
||||
def DELETE(self, app):
|
||||
if self.container.endswith(MULTIUPLOAD_SUFFIX):
|
||||
# anyways, delete multiupload container doesn't need acls
|
||||
# because it depends on GET segment container result for
|
||||
# cleanup
|
||||
pass
|
||||
else:
|
||||
return self._handle_acl(app, 'DELETE')
|
||||
|
||||
def HEAD(self, app):
|
||||
if self.method == 'DELETE':
|
||||
return self._handle_acl(app, 'DELETE')
|
||||
else:
|
||||
return self._handle_acl(app, 'HEAD')
|
||||
|
||||
def GET(self, app):
|
||||
if self.method == 'DELETE' and \
|
||||
self.container.endswith(MULTIUPLOAD_SUFFIX):
|
||||
pass
|
||||
else:
|
||||
return self._handle_acl(app, 'GET')
|
||||
|
||||
def PUT(self, app):
|
||||
req_acl = ACL.from_headers(self.req.headers,
|
||||
Owner(self.user_id, self.user_id))
|
||||
|
||||
# To avoid overwriting the existing bucket's ACL, we send PUT
|
||||
# request first before setting the ACL to make sure that the target
|
||||
# container does not exist.
|
||||
self.req.get_acl_response(app, 'PUT')
|
||||
|
||||
# update metadata
|
||||
self.req.bucket_acl = req_acl
|
||||
|
||||
# FIXME If this request is failed, there is a possibility that the
|
||||
# bucket which has no ACL is left.
|
||||
return self.req.get_acl_response(app, 'POST')
|
||||
|
||||
|
||||
class ObjectAclHandler(BaseAclHandler):
|
||||
"""
|
||||
ObjectAclHandler: Handler for ObjectController
|
||||
"""
|
||||
def HEAD(self, app):
|
||||
# No check object permission needed at DELETE Object
|
||||
if self.method != 'DELETE':
|
||||
return self._handle_acl(app, 'HEAD')
|
||||
|
||||
def PUT(self, app):
|
||||
b_resp = self._handle_acl(app, 'HEAD', obj='')
|
||||
req_acl = ACL.from_headers(self.req.headers,
|
||||
b_resp.bucket_acl.owner,
|
||||
Owner(self.user_id, self.user_id))
|
||||
self.req.object_acl = req_acl
|
||||
|
||||
|
||||
class S3AclHandler(BaseAclHandler):
|
||||
"""
|
||||
S3AclHandler: Handler for S3AclController
|
||||
"""
|
||||
def GET(self, app):
|
||||
self._handle_acl(app, 'HEAD', permission='READ_ACP')
|
||||
|
||||
def PUT(self, app):
|
||||
if self.req.is_object_request:
|
||||
b_resp = self.req.get_acl_response(app, 'HEAD', obj='')
|
||||
o_resp = self._handle_acl(app, 'HEAD', permission='WRITE_ACP')
|
||||
req_acl = self.get_acl(self.req.headers,
|
||||
self.req.xml(ACL.max_xml_length),
|
||||
b_resp.bucket_acl.owner,
|
||||
o_resp.object_acl.owner)
|
||||
|
||||
# Don't change the owner of the resource by PUT acl request.
|
||||
o_resp.object_acl.check_owner(req_acl.owner.id)
|
||||
|
||||
for g in req_acl.grants:
|
||||
self.logger.debug(
|
||||
'Grant %s %s permission on the object /%s/%s' %
|
||||
(g.grantee, g.permission, self.req.container_name,
|
||||
self.req.object_name))
|
||||
self.req.object_acl = req_acl
|
||||
else:
|
||||
self._handle_acl(app, self.method)
|
||||
|
||||
def POST(self, app):
|
||||
if self.req.is_bucket_request:
|
||||
resp = self._handle_acl(app, 'HEAD', permission='WRITE_ACP')
|
||||
|
||||
req_acl = self.get_acl(self.req.headers,
|
||||
self.req.xml(ACL.max_xml_length),
|
||||
resp.bucket_acl.owner)
|
||||
|
||||
# Don't change the owner of the resource by PUT acl request.
|
||||
resp.bucket_acl.check_owner(req_acl.owner.id)
|
||||
|
||||
for g in req_acl.grants:
|
||||
self.logger.debug(
|
||||
'Grant %s %s permission on the bucket /%s' %
|
||||
(g.grantee, g.permission, self.req.container_name))
|
||||
self.req.bucket_acl = req_acl
|
||||
else:
|
||||
self._handle_acl(app, self.method)
|
||||
|
||||
|
||||
class MultiObjectDeleteAclHandler(BaseAclHandler):
|
||||
"""
|
||||
MultiObjectDeleteAclHandler: Handler for MultiObjectDeleteController
|
||||
"""
|
||||
def HEAD(self, app):
|
||||
# Only bucket write acl is required
|
||||
if not self.obj:
|
||||
return self._handle_acl(app, 'HEAD')
|
||||
|
||||
def DELETE(self, app):
|
||||
# Only bucket write acl is required
|
||||
pass
|
||||
|
||||
|
||||
class MultiUploadAclHandler(BaseAclHandler):
|
||||
"""
|
||||
MultiUpload stuff requires acl checking just once for BASE container
|
||||
so that MultiUploadAclHandler extends BaseAclHandler to check acl only
|
||||
when the verb defined. We should define the verb as the first step to
|
||||
request to backend Swift at incoming request.
|
||||
|
||||
Basic Rules:
|
||||
- BASE container name is always w/o 'MULTIUPLOAD_SUFFIX'
|
||||
- Any check timing is ok but we should check it as soon as possible.
|
||||
|
||||
========== ====== ============= ==========
|
||||
Controller Verb CheckResource Permission
|
||||
========== ====== ============= ==========
|
||||
Part PUT Container WRITE
|
||||
Uploads GET Container READ
|
||||
Uploads POST Container WRITE
|
||||
Upload GET Container READ
|
||||
Upload DELETE Container WRITE
|
||||
Upload POST Container WRITE
|
||||
========== ====== ============= ==========
|
||||
|
||||
"""
|
||||
def __init__(self, req, logger):
|
||||
super(MultiUploadAclHandler, self).__init__(req, logger)
|
||||
self.acl_checked = False
|
||||
|
||||
def handle_acl(self, app, method, container=None, obj=None, headers=None):
|
||||
method = method or self.method
|
||||
with self.request_with(container, obj, headers):
|
||||
# MultiUpload stuffs don't need acl check basically.
|
||||
if hasattr(self, method):
|
||||
return getattr(self, method)(app)
|
||||
else:
|
||||
pass
|
||||
|
||||
def HEAD(self, app):
|
||||
# For _check_upload_info
|
||||
self._handle_acl(app, 'HEAD', self.container, '')
|
||||
|
||||
|
||||
class PartAclHandler(MultiUploadAclHandler):
|
||||
"""
|
||||
PartAclHandler: Handler for PartController
|
||||
"""
|
||||
def __init__(self, req, logger):
|
||||
# pylint: disable-msg=E1003
|
||||
super(MultiUploadAclHandler, self).__init__(req, logger)
|
||||
|
||||
def HEAD(self, app):
|
||||
if self.container.endswith(MULTIUPLOAD_SUFFIX):
|
||||
# For _check_upload_info
|
||||
container = self.container[:-len(MULTIUPLOAD_SUFFIX)]
|
||||
self._handle_acl(app, 'HEAD', container, '')
|
||||
else:
|
||||
# For check_copy_source
|
||||
return self._handle_acl(app, 'HEAD', self.container, self.obj)
|
||||
|
||||
|
||||
class UploadsAclHandler(MultiUploadAclHandler):
|
||||
"""
|
||||
UploadsAclHandler: Handler for UploadsController
|
||||
"""
|
||||
def handle_acl(self, app, method, *args, **kwargs):
|
||||
method = method or self.method
|
||||
if hasattr(self, method):
|
||||
return getattr(self, method)(app)
|
||||
else:
|
||||
pass
|
||||
|
||||
def GET(self, app):
|
||||
# List Multipart Upload
|
||||
self._handle_acl(app, 'GET', self.container, '')
|
||||
|
||||
def PUT(self, app):
|
||||
if not self.acl_checked:
|
||||
resp = self._handle_acl(app, 'HEAD', obj='')
|
||||
req_acl = ACL.from_headers(self.req.headers,
|
||||
resp.bucket_acl.owner,
|
||||
Owner(self.user_id, self.user_id))
|
||||
acl_headers = encode_acl('object', req_acl)
|
||||
self.req.headers[sysmeta_header('object', 'tmpacl')] = \
|
||||
acl_headers[sysmeta_header('object', 'acl')]
|
||||
self.acl_checked = True
|
||||
|
||||
|
||||
class UploadAclHandler(MultiUploadAclHandler):
|
||||
"""
|
||||
UploadAclHandler: Handler for UploadController
|
||||
"""
|
||||
def handle_acl(self, app, method, *args, **kwargs):
|
||||
method = method or self.method
|
||||
if hasattr(self, method):
|
||||
return getattr(self, method)(app)
|
||||
else:
|
||||
pass
|
||||
|
||||
def HEAD(self, app):
|
||||
# FIXME: GET HEAD case conflicts with GET service
|
||||
method = 'GET' if self.method == 'GET' else 'HEAD'
|
||||
self._handle_acl(app, method, self.container, '')
|
||||
|
||||
def PUT(self, app):
|
||||
container = self.req.container_name + MULTIUPLOAD_SUFFIX
|
||||
obj = '%s/%s' % (self.obj, self.req.params['uploadId'])
|
||||
resp = self.req._get_response(app, 'HEAD', container, obj)
|
||||
self.req.headers[sysmeta_header('object', 'acl')] = \
|
||||
resp.sysmeta_headers.get(sysmeta_header('object', 'tmpacl'))
|
||||
|
||||
|
||||
"""
|
||||
ACL_MAP =
|
||||
{
|
||||
('<s3_method>', '<swift_method>', '<swift_resource>'):
|
||||
{'Resource': '<check_resource>',
|
||||
'Permission': '<check_permission>'},
|
||||
...
|
||||
}
|
||||
|
||||
s3_method: Method of S3 Request from user to s3api
|
||||
swift_method: Method of Swift Request from s3api to swift
|
||||
swift_resource: Resource of Swift Request from s3api to swift
|
||||
check_resource: <container/object>
|
||||
check_permission: <OWNER/READ/WRITE/READ_ACP/WRITE_ACP>
|
||||
"""
|
||||
ACL_MAP = {
|
||||
# HEAD Bucket
|
||||
('HEAD', 'HEAD', 'container'):
|
||||
{'Permission': 'READ'},
|
||||
# GET Service
|
||||
('GET', 'HEAD', 'container'):
|
||||
{'Permission': 'OWNER'},
|
||||
# GET Bucket, List Parts, List Multipart Upload
|
||||
('GET', 'GET', 'container'):
|
||||
{'Permission': 'READ'},
|
||||
# PUT Object, PUT Object Copy
|
||||
('PUT', 'HEAD', 'container'):
|
||||
{'Permission': 'WRITE'},
|
||||
# DELETE Bucket
|
||||
('DELETE', 'DELETE', 'container'):
|
||||
{'Permission': 'OWNER'},
|
||||
# HEAD Object
|
||||
('HEAD', 'HEAD', 'object'):
|
||||
{'Permission': 'READ'},
|
||||
# GET Object
|
||||
('GET', 'GET', 'object'):
|
||||
{'Permission': 'READ'},
|
||||
# PUT Object Copy, Upload Part Copy
|
||||
('PUT', 'HEAD', 'object'):
|
||||
{'Permission': 'READ'},
|
||||
# Abort Multipart Upload
|
||||
('DELETE', 'HEAD', 'container'):
|
||||
{'Permission': 'WRITE'},
|
||||
# Delete Object
|
||||
('DELETE', 'DELETE', 'object'):
|
||||
{'Resource': 'container',
|
||||
'Permission': 'WRITE'},
|
||||
# Complete Multipart Upload, DELETE Multiple Objects,
|
||||
# Initiate Multipart Upload
|
||||
('POST', 'HEAD', 'container'):
|
||||
{'Permission': 'WRITE'},
|
||||
}
|
95
swift/common/middleware/s3api/acl_utils.py
Normal file
95
swift/common/middleware/s3api/acl_utils.py
Normal file
@ -0,0 +1,95 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from swift.common.middleware.s3api.exception import ACLError
|
||||
from swift.common.middleware.s3api.etree import fromstring, XMLSyntaxError, \
|
||||
DocumentInvalid, XMLNS_XSI
|
||||
from swift.common.middleware.s3api.s3response import S3NotImplemented, \
|
||||
MalformedACLError, InvalidArgument
|
||||
|
||||
|
||||
def swift_acl_translate(acl, group='', user='', xml=False):
|
||||
"""
|
||||
Takes an S3 style ACL and returns a list of header/value pairs that
|
||||
implement that ACL in Swift, or "NotImplemented" if there isn't a way to do
|
||||
that yet.
|
||||
"""
|
||||
swift_acl = {}
|
||||
swift_acl['public-read'] = [['X-Container-Read', '.r:*,.rlistings']]
|
||||
# Swift does not support public write:
|
||||
# https://answers.launchpad.net/swift/+question/169541
|
||||
swift_acl['public-read-write'] = [['X-Container-Write', '.r:*'],
|
||||
['X-Container-Read',
|
||||
'.r:*,.rlistings']]
|
||||
|
||||
# TODO: if there's a way to get group and user, this should work for
|
||||
# private:
|
||||
# swift_acl['private'] = \
|
||||
# [['HTTP_X_CONTAINER_WRITE', group + ':' + user], \
|
||||
# ['HTTP_X_CONTAINER_READ', group + ':' + user]]
|
||||
swift_acl['private'] = [['X-Container-Write', '.'],
|
||||
['X-Container-Read', '.']]
|
||||
if xml:
|
||||
# We are working with XML and need to parse it
|
||||
try:
|
||||
elem = fromstring(acl, 'AccessControlPolicy')
|
||||
except (XMLSyntaxError, DocumentInvalid):
|
||||
raise MalformedACLError()
|
||||
acl = 'unknown'
|
||||
for grant in elem.findall('./AccessControlList/Grant'):
|
||||
permission = grant.find('./Permission').text
|
||||
grantee = grant.find('./Grantee').get('{%s}type' % XMLNS_XSI)
|
||||
if permission == "FULL_CONTROL" and grantee == 'CanonicalUser' and\
|
||||
acl != 'public-read' and acl != 'public-read-write':
|
||||
acl = 'private'
|
||||
elif permission == "READ" and grantee == 'Group' and\
|
||||
acl != 'public-read-write':
|
||||
acl = 'public-read'
|
||||
elif permission == "WRITE" and grantee == 'Group':
|
||||
acl = 'public-read-write'
|
||||
else:
|
||||
acl = 'unsupported'
|
||||
|
||||
if acl == 'authenticated-read':
|
||||
raise S3NotImplemented()
|
||||
elif acl not in swift_acl:
|
||||
raise ACLError()
|
||||
|
||||
return swift_acl[acl]
|
||||
|
||||
|
||||
def handle_acl_header(req):
|
||||
"""
|
||||
Handle the x-amz-acl header.
|
||||
Note that this header currently used for only normal-acl
|
||||
(not implemented) on s3acl.
|
||||
TODO: add translation to swift acl like as x-container-read to s3acl
|
||||
"""
|
||||
|
||||
amz_acl = req.environ['HTTP_X_AMZ_ACL']
|
||||
# Translate the Amazon ACL to something that can be
|
||||
# implemented in Swift, 501 otherwise. Swift uses POST
|
||||
# for ACLs, whereas S3 uses PUT.
|
||||
del req.environ['HTTP_X_AMZ_ACL']
|
||||
if req.query_string:
|
||||
req.query_string = ''
|
||||
|
||||
try:
|
||||
translated_acl = swift_acl_translate(amz_acl)
|
||||
except ACLError:
|
||||
raise InvalidArgument('x-amz-acl', amz_acl)
|
||||
|
||||
for header, acl in translated_acl:
|
||||
req.headers[header] = acl
|
52
swift/common/middleware/s3api/controllers/__init__.py
Normal file
52
swift/common/middleware/s3api/controllers/__init__.py
Normal file
@ -0,0 +1,52 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from swift.common.middleware.s3api.controllers.base import Controller, \
|
||||
UnsupportedController
|
||||
from swift.common.middleware.s3api.controllers.service import ServiceController
|
||||
from swift.common.middleware.s3api.controllers.bucket import BucketController
|
||||
from swift.common.middleware.s3api.controllers.obj import ObjectController
|
||||
|
||||
from swift.common.middleware.s3api.controllers.acl import AclController
|
||||
from swift.common.middleware.s3api.controllers.s3_acl import S3AclController
|
||||
from swift.common.middleware.s3api.controllers.multi_delete import \
|
||||
MultiObjectDeleteController
|
||||
from swift.common.middleware.s3api.controllers.multi_upload import \
|
||||
UploadController, PartController, UploadsController
|
||||
from swift.common.middleware.s3api.controllers.location import \
|
||||
LocationController
|
||||
from swift.common.middleware.s3api.controllers.logging import \
|
||||
LoggingStatusController
|
||||
from swift.common.middleware.s3api.controllers.versioning import \
|
||||
VersioningController
|
||||
|
||||
__all__ = [
|
||||
'Controller',
|
||||
'ServiceController',
|
||||
'BucketController',
|
||||
'ObjectController',
|
||||
|
||||
'AclController',
|
||||
'S3AclController',
|
||||
'MultiObjectDeleteController',
|
||||
'PartController',
|
||||
'UploadsController',
|
||||
'UploadController',
|
||||
'LocationController',
|
||||
'LoggingStatusController',
|
||||
'VersioningController',
|
||||
|
||||
'UnsupportedController',
|
||||
]
|
130
swift/common/middleware/s3api/controllers/acl.py
Normal file
130
swift/common/middleware/s3api/controllers/acl.py
Normal file
@ -0,0 +1,130 @@
|
||||
# Copyright (c) 2010-2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from swift.common.http import HTTP_OK
|
||||
from swift.common.middleware.acl import parse_acl, referrer_allowed
|
||||
from swift.common.utils import public
|
||||
|
||||
from swift.common.middleware.s3api.exception import ACLError
|
||||
from swift.common.middleware.s3api.controllers.base import Controller
|
||||
from swift.common.middleware.s3api.s3response import HTTPOk, S3NotImplemented, \
|
||||
MalformedACLError, UnexpectedContent, MissingSecurityHeader
|
||||
from swift.common.middleware.s3api.etree import Element, SubElement, tostring
|
||||
from swift.common.middleware.s3api.acl_utils import swift_acl_translate, \
|
||||
XMLNS_XSI
|
||||
|
||||
|
||||
MAX_ACL_BODY_SIZE = 200 * 1024
|
||||
|
||||
|
||||
def get_acl(account_name, headers):
|
||||
"""
|
||||
Attempts to construct an S3 ACL based on what is found in the swift headers
|
||||
"""
|
||||
|
||||
elem = Element('AccessControlPolicy')
|
||||
owner = SubElement(elem, 'Owner')
|
||||
SubElement(owner, 'ID').text = account_name
|
||||
SubElement(owner, 'DisplayName').text = account_name
|
||||
access_control_list = SubElement(elem, 'AccessControlList')
|
||||
|
||||
# grant FULL_CONTROL to myself by default
|
||||
grant = SubElement(access_control_list, 'Grant')
|
||||
grantee = SubElement(grant, 'Grantee', nsmap={'xsi': XMLNS_XSI})
|
||||
grantee.set('{%s}type' % XMLNS_XSI, 'CanonicalUser')
|
||||
SubElement(grantee, 'ID').text = account_name
|
||||
SubElement(grantee, 'DisplayName').text = account_name
|
||||
SubElement(grant, 'Permission').text = 'FULL_CONTROL'
|
||||
|
||||
referrers, _ = parse_acl(headers.get('x-container-read'))
|
||||
if referrer_allowed('unknown', referrers):
|
||||
# grant public-read access
|
||||
grant = SubElement(access_control_list, 'Grant')
|
||||
grantee = SubElement(grant, 'Grantee', nsmap={'xsi': XMLNS_XSI})
|
||||
grantee.set('{%s}type' % XMLNS_XSI, 'Group')
|
||||
SubElement(grantee, 'URI').text = \
|
||||
'http://acs.amazonaws.com/groups/global/AllUsers'
|
||||
SubElement(grant, 'Permission').text = 'READ'
|
||||
|
||||
referrers, _ = parse_acl(headers.get('x-container-write'))
|
||||
if referrer_allowed('unknown', referrers):
|
||||
# grant public-write access
|
||||
grant = SubElement(access_control_list, 'Grant')
|
||||
grantee = SubElement(grant, 'Grantee', nsmap={'xsi': XMLNS_XSI})
|
||||
grantee.set('{%s}type' % XMLNS_XSI, 'Group')
|
||||
SubElement(grantee, 'URI').text = \
|
||||
'http://acs.amazonaws.com/groups/global/AllUsers'
|
||||
SubElement(grant, 'Permission').text = 'WRITE'
|
||||
|
||||
body = tostring(elem)
|
||||
|
||||
return HTTPOk(body=body, content_type="text/plain")
|
||||
|
||||
|
||||
class AclController(Controller):
|
||||
"""
|
||||
Handles the following APIs:
|
||||
|
||||
* GET Bucket acl
|
||||
* PUT Bucket acl
|
||||
* GET Object acl
|
||||
* PUT Object acl
|
||||
|
||||
Those APIs are logged as ACL operations in the S3 server log.
|
||||
"""
|
||||
@public
|
||||
def GET(self, req):
|
||||
"""
|
||||
Handles GET Bucket acl and GET Object acl.
|
||||
"""
|
||||
resp = req.get_response(self.app, method='HEAD')
|
||||
|
||||
return get_acl(req.user_id, resp.headers)
|
||||
|
||||
@public
|
||||
def PUT(self, req):
|
||||
"""
|
||||
Handles PUT Bucket acl and PUT Object acl.
|
||||
"""
|
||||
if req.is_object_request:
|
||||
# Handle Object ACL
|
||||
raise S3NotImplemented()
|
||||
else:
|
||||
# Handle Bucket ACL
|
||||
xml = req.xml(MAX_ACL_BODY_SIZE)
|
||||
if all(['HTTP_X_AMZ_ACL' in req.environ, xml]):
|
||||
# S3 doesn't allow to give ACL with both ACL header and body.
|
||||
raise UnexpectedContent()
|
||||
elif not any(['HTTP_X_AMZ_ACL' in req.environ, xml]):
|
||||
# Both canned ACL header and xml body are missing
|
||||
raise MissingSecurityHeader(missing_header_name='x-amz-acl')
|
||||
else:
|
||||
# correct ACL exists in the request
|
||||
if xml:
|
||||
# We very likely have an XML-based ACL request.
|
||||
# let's try to translate to the request header
|
||||
try:
|
||||
translated_acl = swift_acl_translate(xml, xml=True)
|
||||
except ACLError:
|
||||
raise MalformedACLError()
|
||||
|
||||
for header, acl in translated_acl:
|
||||
req.headers[header] = acl
|
||||
|
||||
resp = req.get_response(self.app, 'POST')
|
||||
resp.status = HTTP_OK
|
||||
resp.headers.update({'Location': req.container_name})
|
||||
|
||||
return resp
|
100
swift/common/middleware/s3api/controllers/base.py
Normal file
100
swift/common/middleware/s3api/controllers/base.py
Normal file
@ -0,0 +1,100 @@
|
||||
# Copyright (c) 2010-2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import functools
|
||||
|
||||
from swift.common.middleware.s3api.s3response import S3NotImplemented, \
|
||||
InvalidRequest
|
||||
from swift.common.middleware.s3api.utils import camel_to_snake
|
||||
|
||||
|
||||
def bucket_operation(func=None, err_resp=None, err_msg=None):
|
||||
"""
|
||||
A decorator to ensure that the request is a bucket operation. If the
|
||||
target resource is an object, this decorator updates the request by default
|
||||
so that the controller handles it as a bucket operation. If 'err_resp' is
|
||||
specified, this raises it on error instead.
|
||||
"""
|
||||
def _bucket_operation(func):
|
||||
@functools.wraps(func)
|
||||
def wrapped(self, req):
|
||||
if not req.is_bucket_request:
|
||||
if err_resp:
|
||||
raise err_resp(msg=err_msg)
|
||||
|
||||
self.logger.debug('A key is specified for bucket API.')
|
||||
req.object_name = None
|
||||
|
||||
return func(self, req)
|
||||
|
||||
return wrapped
|
||||
|
||||
if func:
|
||||
return _bucket_operation(func)
|
||||
else:
|
||||
return _bucket_operation
|
||||
|
||||
|
||||
def object_operation(func):
|
||||
"""
|
||||
A decorator to ensure that the request is an object operation. If the
|
||||
target resource is not an object, this raises an error response.
|
||||
"""
|
||||
@functools.wraps(func)
|
||||
def wrapped(self, req):
|
||||
if not req.is_object_request:
|
||||
raise InvalidRequest('A key must be specified')
|
||||
|
||||
return func(self, req)
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def check_container_existence(func):
|
||||
"""
|
||||
A decorator to ensure the container existence.
|
||||
"""
|
||||
@functools.wraps(func)
|
||||
def check_container(self, req):
|
||||
req.get_container_info(self.app)
|
||||
return func(self, req)
|
||||
|
||||
return check_container
|
||||
|
||||
|
||||
class Controller(object):
|
||||
"""
|
||||
Base WSGI controller class for the middleware
|
||||
"""
|
||||
def __init__(self, app, conf, logger, **kwargs):
|
||||
self.app = app
|
||||
self.conf = conf
|
||||
self.logger = logger
|
||||
|
||||
@classmethod
|
||||
def resource_type(cls):
|
||||
"""
|
||||
Returns the target resource type of this controller.
|
||||
"""
|
||||
name = cls.__name__[:-len('Controller')]
|
||||
return camel_to_snake(name).upper()
|
||||
|
||||
|
||||
class UnsupportedController(Controller):
|
||||
"""
|
||||
Handles unsupported requests.
|
||||
"""
|
||||
def __init__(self, app, conf, logger, **kwargs):
|
||||
raise S3NotImplemented('The requested resource is not implemented')
|
251
swift/common/middleware/s3api/controllers/bucket.py
Normal file
251
swift/common/middleware/s3api/controllers/bucket.py
Normal file
@ -0,0 +1,251 @@
|
||||
# Copyright (c) 2010-2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
from base64 import standard_b64encode as b64encode
|
||||
from base64 import standard_b64decode as b64decode
|
||||
|
||||
from swift.common.http import HTTP_OK
|
||||
from swift.common.utils import json, public, config_true_value
|
||||
|
||||
from swift.common.middleware.s3api.controllers.base import Controller
|
||||
from swift.common.middleware.s3api.etree import Element, SubElement, tostring, \
|
||||
fromstring, XMLSyntaxError, DocumentInvalid
|
||||
from swift.common.middleware.s3api.s3response import HTTPOk, S3NotImplemented, \
|
||||
InvalidArgument, \
|
||||
MalformedXML, InvalidLocationConstraint, NoSuchBucket, \
|
||||
BucketNotEmpty, InternalError, ServiceUnavailable, NoSuchKey
|
||||
from swift.common.middleware.s3api.utils import MULTIUPLOAD_SUFFIX
|
||||
|
||||
MAX_PUT_BUCKET_BODY_SIZE = 10240
|
||||
|
||||
|
||||
class BucketController(Controller):
|
||||
"""
|
||||
Handles bucket request.
|
||||
"""
|
||||
def _delete_segments_bucket(self, req):
|
||||
"""
|
||||
Before delete bucket, delete segments bucket if existing.
|
||||
"""
|
||||
container = req.container_name + MULTIUPLOAD_SUFFIX
|
||||
marker = ''
|
||||
seg = ''
|
||||
|
||||
try:
|
||||
resp = req.get_response(self.app, 'HEAD')
|
||||
if int(resp.sw_headers['X-Container-Object-Count']) > 0:
|
||||
raise BucketNotEmpty()
|
||||
# FIXME: This extra HEAD saves unexpected segment deletion
|
||||
# but if a complete multipart upload happen while cleanup
|
||||
# segment container below, completed object may be missing its
|
||||
# segments unfortunately. To be safer, it might be good
|
||||
# to handle if the segments can be deleted for each object.
|
||||
except NoSuchBucket:
|
||||
pass
|
||||
|
||||
try:
|
||||
while True:
|
||||
# delete all segments
|
||||
resp = req.get_response(self.app, 'GET', container,
|
||||
query={'format': 'json',
|
||||
'marker': marker})
|
||||
segments = json.loads(resp.body)
|
||||
for seg in segments:
|
||||
try:
|
||||
req.get_response(self.app, 'DELETE', container,
|
||||
seg['name'])
|
||||
except NoSuchKey:
|
||||
pass
|
||||
except InternalError:
|
||||
raise ServiceUnavailable()
|
||||
if segments:
|
||||
marker = seg['name']
|
||||
else:
|
||||
break
|
||||
req.get_response(self.app, 'DELETE', container)
|
||||
except NoSuchBucket:
|
||||
return
|
||||
except (BucketNotEmpty, InternalError):
|
||||
raise ServiceUnavailable()
|
||||
|
||||
@public
|
||||
def HEAD(self, req):
|
||||
"""
|
||||
Handle HEAD Bucket (Get Metadata) request
|
||||
"""
|
||||
resp = req.get_response(self.app)
|
||||
|
||||
return HTTPOk(headers=resp.headers)
|
||||
|
||||
@public
|
||||
def GET(self, req):
|
||||
"""
|
||||
Handle GET Bucket (List Objects) request
|
||||
"""
|
||||
|
||||
max_keys = req.get_validated_param(
|
||||
'max-keys', self.conf.max_bucket_listing)
|
||||
# TODO: Separate max_bucket_listing and default_bucket_listing
|
||||
tag_max_keys = max_keys
|
||||
max_keys = min(max_keys, self.conf.max_bucket_listing)
|
||||
|
||||
encoding_type = req.params.get('encoding-type')
|
||||
if encoding_type is not None and encoding_type != 'url':
|
||||
err_msg = 'Invalid Encoding Method specified in Request'
|
||||
raise InvalidArgument('encoding-type', encoding_type, err_msg)
|
||||
|
||||
query = {
|
||||
'format': 'json',
|
||||
'limit': max_keys + 1,
|
||||
}
|
||||
if 'marker' in req.params:
|
||||
query.update({'marker': req.params['marker']})
|
||||
if 'prefix' in req.params:
|
||||
query.update({'prefix': req.params['prefix']})
|
||||
if 'delimiter' in req.params:
|
||||
query.update({'delimiter': req.params['delimiter']})
|
||||
|
||||
# GET Bucket (List Objects) Version 2 parameters
|
||||
is_v2 = int(req.params.get('list-type', '1')) == 2
|
||||
fetch_owner = False
|
||||
if is_v2:
|
||||
if 'start-after' in req.params:
|
||||
query.update({'marker': req.params['start-after']})
|
||||
# continuation-token overrides start-after
|
||||
if 'continuation-token' in req.params:
|
||||
decoded = b64decode(req.params['continuation-token'])
|
||||
query.update({'marker': decoded})
|
||||
if 'fetch-owner' in req.params:
|
||||
fetch_owner = config_true_value(req.params['fetch-owner'])
|
||||
|
||||
resp = req.get_response(self.app, query=query)
|
||||
|
||||
objects = json.loads(resp.body)
|
||||
|
||||
elem = Element('ListBucketResult')
|
||||
SubElement(elem, 'Name').text = req.container_name
|
||||
SubElement(elem, 'Prefix').text = req.params.get('prefix')
|
||||
|
||||
# in order to judge that truncated is valid, check whether
|
||||
# max_keys + 1 th element exists in swift.
|
||||
is_truncated = max_keys > 0 and len(objects) > max_keys
|
||||
objects = objects[:max_keys]
|
||||
|
||||
if not is_v2:
|
||||
SubElement(elem, 'Marker').text = req.params.get('marker')
|
||||
if is_truncated and 'delimiter' in req.params:
|
||||
if 'name' in objects[-1]:
|
||||
SubElement(elem, 'NextMarker').text = \
|
||||
objects[-1]['name']
|
||||
if 'subdir' in objects[-1]:
|
||||
SubElement(elem, 'NextMarker').text = \
|
||||
objects[-1]['subdir']
|
||||
else:
|
||||
if is_truncated:
|
||||
if 'name' in objects[-1]:
|
||||
SubElement(elem, 'NextContinuationToken').text = \
|
||||
b64encode(objects[-1]['name'])
|
||||
if 'subdir' in objects[-1]:
|
||||
SubElement(elem, 'NextContinuationToken').text = \
|
||||
b64encode(objects[-1]['subdir'])
|
||||
if 'continuation-token' in req.params:
|
||||
SubElement(elem, 'ContinuationToken').text = \
|
||||
req.params['continuation-token']
|
||||
if 'start-after' in req.params:
|
||||
SubElement(elem, 'StartAfter').text = \
|
||||
req.params['start-after']
|
||||
SubElement(elem, 'KeyCount').text = str(len(objects))
|
||||
|
||||
SubElement(elem, 'MaxKeys').text = str(tag_max_keys)
|
||||
|
||||
if 'delimiter' in req.params:
|
||||
SubElement(elem, 'Delimiter').text = req.params['delimiter']
|
||||
|
||||
if encoding_type is not None:
|
||||
SubElement(elem, 'EncodingType').text = encoding_type
|
||||
|
||||
SubElement(elem, 'IsTruncated').text = \
|
||||
'true' if is_truncated else 'false'
|
||||
|
||||
for o in objects:
|
||||
if 'subdir' not in o:
|
||||
contents = SubElement(elem, 'Contents')
|
||||
SubElement(contents, 'Key').text = o['name']
|
||||
SubElement(contents, 'LastModified').text = \
|
||||
o['last_modified'][:-3] + 'Z'
|
||||
SubElement(contents, 'ETag').text = '"%s"' % o['hash']
|
||||
SubElement(contents, 'Size').text = str(o['bytes'])
|
||||
if fetch_owner or not is_v2:
|
||||
owner = SubElement(contents, 'Owner')
|
||||
SubElement(owner, 'ID').text = req.user_id
|
||||
SubElement(owner, 'DisplayName').text = req.user_id
|
||||
SubElement(contents, 'StorageClass').text = 'STANDARD'
|
||||
|
||||
for o in objects:
|
||||
if 'subdir' in o:
|
||||
common_prefixes = SubElement(elem, 'CommonPrefixes')
|
||||
SubElement(common_prefixes, 'Prefix').text = o['subdir']
|
||||
|
||||
body = tostring(elem, encoding_type=encoding_type)
|
||||
|
||||
return HTTPOk(body=body, content_type='application/xml')
|
||||
|
||||
@public
|
||||
def PUT(self, req):
|
||||
"""
|
||||
Handle PUT Bucket request
|
||||
"""
|
||||
xml = req.xml(MAX_PUT_BUCKET_BODY_SIZE)
|
||||
if xml:
|
||||
# check location
|
||||
try:
|
||||
elem = fromstring(
|
||||
xml, 'CreateBucketConfiguration', self.logger)
|
||||
location = elem.find('./LocationConstraint').text
|
||||
except (XMLSyntaxError, DocumentInvalid):
|
||||
raise MalformedXML()
|
||||
except Exception as e:
|
||||
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
self.logger.error(e)
|
||||
raise exc_type, exc_value, exc_traceback
|
||||
|
||||
if location != self.conf.location:
|
||||
# s3api cannot support multiple regions currently.
|
||||
raise InvalidLocationConstraint()
|
||||
|
||||
resp = req.get_response(self.app)
|
||||
|
||||
resp.status = HTTP_OK
|
||||
resp.location = '/' + req.container_name
|
||||
|
||||
return resp
|
||||
|
||||
@public
|
||||
def DELETE(self, req):
|
||||
"""
|
||||
Handle DELETE Bucket request
|
||||
"""
|
||||
if self.conf.allow_multipart_uploads:
|
||||
self._delete_segments_bucket(req)
|
||||
resp = req.get_response(self.app)
|
||||
return resp
|
||||
|
||||
@public
|
||||
def POST(self, req):
|
||||
"""
|
||||
Handle POST Bucket request
|
||||
"""
|
||||
raise S3NotImplemented()
|
42
swift/common/middleware/s3api/controllers/location.py
Normal file
42
swift/common/middleware/s3api/controllers/location.py
Normal file
@ -0,0 +1,42 @@
|
||||
# Copyright (c) 2010-2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from swift.common.utils import public
|
||||
|
||||
from swift.common.middleware.s3api.controllers.base import Controller, \
|
||||
bucket_operation
|
||||
from swift.common.middleware.s3api.etree import Element, tostring
|
||||
from swift.common.middleware.s3api.s3response import HTTPOk
|
||||
|
||||
|
||||
class LocationController(Controller):
|
||||
"""
|
||||
Handles GET Bucket location, which is logged as a LOCATION operation in the
|
||||
S3 server log.
|
||||
"""
|
||||
@public
|
||||
@bucket_operation
|
||||
def GET(self, req):
|
||||
"""
|
||||
Handles GET Bucket location.
|
||||
"""
|
||||
req.get_response(self.app, method='HEAD')
|
||||
|
||||
elem = Element('LocationConstraint')
|
||||
if self.conf.location != 'US':
|
||||
elem.text = self.conf.location
|
||||
body = tostring(elem)
|
||||
|
||||
return HTTPOk(body=body, content_type='application/xml')
|
54
swift/common/middleware/s3api/controllers/logging.py
Normal file
54
swift/common/middleware/s3api/controllers/logging.py
Normal file
@ -0,0 +1,54 @@
|
||||
# Copyright (c) 2010-2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from swift.common.utils import public
|
||||
|
||||
from swift.common.middleware.s3api.controllers.base import Controller, \
|
||||
bucket_operation
|
||||
from swift.common.middleware.s3api.etree import Element, tostring
|
||||
from swift.common.middleware.s3api.s3response import HTTPOk, S3NotImplemented, \
|
||||
NoLoggingStatusForKey
|
||||
|
||||
|
||||
class LoggingStatusController(Controller):
|
||||
"""
|
||||
Handles the following APIs:
|
||||
|
||||
* GET Bucket logging
|
||||
* PUT Bucket logging
|
||||
|
||||
Those APIs are logged as LOGGING_STATUS operations in the S3 server log.
|
||||
"""
|
||||
@public
|
||||
@bucket_operation(err_resp=NoLoggingStatusForKey)
|
||||
def GET(self, req):
|
||||
"""
|
||||
Handles GET Bucket logging.
|
||||
"""
|
||||
req.get_response(self.app, method='HEAD')
|
||||
|
||||
# logging disabled
|
||||
elem = Element('BucketLoggingStatus')
|
||||
body = tostring(elem)
|
||||
|
||||
return HTTPOk(body=body, content_type='application/xml')
|
||||
|
||||
@public
|
||||
@bucket_operation(err_resp=NoLoggingStatusForKey)
|
||||
def PUT(self, req):
|
||||
"""
|
||||
Handles PUT Bucket logging.
|
||||
"""
|
||||
raise S3NotImplemented()
|
126
swift/common/middleware/s3api/controllers/multi_delete.py
Normal file
126
swift/common/middleware/s3api/controllers/multi_delete.py
Normal file
@ -0,0 +1,126 @@
|
||||
# Copyright (c) 2010-2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
|
||||
from swift.common.utils import public
|
||||
|
||||
from swift.common.middleware.s3api.controllers.base import Controller, \
|
||||
bucket_operation
|
||||
from swift.common.middleware.s3api.etree import Element, SubElement, \
|
||||
fromstring, tostring, XMLSyntaxError, DocumentInvalid
|
||||
from swift.common.middleware.s3api.s3response import HTTPOk, S3NotImplemented, \
|
||||
NoSuchKey, ErrorResponse, MalformedXML, UserKeyMustBeSpecified, \
|
||||
AccessDenied, MissingRequestBodyError
|
||||
|
||||
MAX_MULTI_DELETE_BODY_SIZE = 61365
|
||||
|
||||
|
||||
class MultiObjectDeleteController(Controller):
|
||||
"""
|
||||
Handles Delete Multiple Objects, which is logged as a MULTI_OBJECT_DELETE
|
||||
operation in the S3 server log.
|
||||
"""
|
||||
def _gen_error_body(self, error, elem, delete_list):
|
||||
for key, version in delete_list:
|
||||
if version is not None:
|
||||
# TODO: delete the specific version of the object
|
||||
raise S3NotImplemented()
|
||||
|
||||
error_elem = SubElement(elem, 'Error')
|
||||
SubElement(error_elem, 'Key').text = key
|
||||
SubElement(error_elem, 'Code').text = error.__class__.__name__
|
||||
SubElement(error_elem, 'Message').text = error._msg
|
||||
|
||||
return tostring(elem)
|
||||
|
||||
@public
|
||||
@bucket_operation
|
||||
def POST(self, req):
|
||||
"""
|
||||
Handles Delete Multiple Objects.
|
||||
"""
|
||||
def object_key_iter(elem):
|
||||
for obj in elem.iterchildren('Object'):
|
||||
key = obj.find('./Key').text
|
||||
if not key:
|
||||
raise UserKeyMustBeSpecified()
|
||||
version = obj.find('./VersionId')
|
||||
if version is not None:
|
||||
version = version.text
|
||||
|
||||
yield key, version
|
||||
|
||||
try:
|
||||
xml = req.xml(MAX_MULTI_DELETE_BODY_SIZE)
|
||||
if not xml:
|
||||
raise MissingRequestBodyError()
|
||||
|
||||
req.check_md5(xml)
|
||||
elem = fromstring(xml, 'Delete', self.logger)
|
||||
|
||||
quiet = elem.find('./Quiet')
|
||||
if quiet is not None and quiet.text.lower() == 'true':
|
||||
self.quiet = True
|
||||
else:
|
||||
self.quiet = False
|
||||
|
||||
delete_list = list(object_key_iter(elem))
|
||||
if len(delete_list) > self.conf.max_multi_delete_objects:
|
||||
raise MalformedXML()
|
||||
except (XMLSyntaxError, DocumentInvalid):
|
||||
raise MalformedXML()
|
||||
except ErrorResponse:
|
||||
raise
|
||||
except Exception as e:
|
||||
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
self.logger.error(e)
|
||||
raise exc_type, exc_value, exc_traceback
|
||||
|
||||
elem = Element('DeleteResult')
|
||||
|
||||
# check bucket existence
|
||||
try:
|
||||
req.get_response(self.app, 'HEAD')
|
||||
except AccessDenied as error:
|
||||
body = self._gen_error_body(error, elem, delete_list)
|
||||
return HTTPOk(body=body)
|
||||
|
||||
for key, version in delete_list:
|
||||
if version is not None:
|
||||
# TODO: delete the specific version of the object
|
||||
raise S3NotImplemented()
|
||||
|
||||
req.object_name = key
|
||||
|
||||
try:
|
||||
query = req.gen_multipart_manifest_delete_query(self.app)
|
||||
req.get_response(self.app, method='DELETE', query=query)
|
||||
except NoSuchKey:
|
||||
pass
|
||||
except ErrorResponse as e:
|
||||
error = SubElement(elem, 'Error')
|
||||
SubElement(error, 'Key').text = key
|
||||
SubElement(error, 'Code').text = e.__class__.__name__
|
||||
SubElement(error, 'Message').text = e._msg
|
||||
continue
|
||||
|
||||
if not self.quiet:
|
||||
deleted = SubElement(elem, 'Deleted')
|
||||
SubElement(deleted, 'Key').text = key
|
||||
|
||||
body = tostring(elem)
|
||||
|
||||
return HTTPOk(body=body)
|
671
swift/common/middleware/s3api/controllers/multi_upload.py
Normal file
671
swift/common/middleware/s3api/controllers/multi_upload.py
Normal file
@ -0,0 +1,671 @@
|
||||
# Copyright (c) 2010-2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Implementation of S3 Multipart Upload.
|
||||
|
||||
This module implements S3 Multipart Upload APIs with the Swift SLO feature.
|
||||
The following explains how S3api uses swift container and objects to store S3
|
||||
upload information:
|
||||
|
||||
-----------------
|
||||
[bucket]+segments
|
||||
-----------------
|
||||
|
||||
A container to store upload information. [bucket] is the original bucket
|
||||
where multipart upload is initiated.
|
||||
|
||||
-----------------------------
|
||||
[bucket]+segments/[upload_id]
|
||||
-----------------------------
|
||||
|
||||
A object of the ongoing upload id. The object is empty and used for
|
||||
checking the target upload status. If the object exists, it means that the
|
||||
upload is initiated but not either completed or aborted.
|
||||
|
||||
-------------------------------------------
|
||||
[bucket]+segments/[upload_id]/[part_number]
|
||||
-------------------------------------------
|
||||
|
||||
The last suffix is the part number under the upload id. When the client uploads
|
||||
the parts, they will be stored in the namespace with
|
||||
[bucket]+segments/[upload_id]/[part_number].
|
||||
|
||||
Example listing result in the [bucket]+segments container::
|
||||
|
||||
[bucket]+segments/[upload_id1] # upload id object for upload_id1
|
||||
[bucket]+segments/[upload_id1]/1 # part object for upload_id1
|
||||
[bucket]+segments/[upload_id1]/2 # part object for upload_id1
|
||||
[bucket]+segments/[upload_id1]/3 # part object for upload_id1
|
||||
[bucket]+segments/[upload_id2] # upload id object for upload_id2
|
||||
[bucket]+segments/[upload_id2]/1 # part object for upload_id2
|
||||
[bucket]+segments/[upload_id2]/2 # part object for upload_id2
|
||||
.
|
||||
.
|
||||
|
||||
Those part objects are directly used as segments of a Swift
|
||||
Static Large Object when the multipart upload is completed.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
from swift.common.swob import Range
|
||||
from swift.common.utils import json, public
|
||||
from swift.common.db import utf8encode
|
||||
|
||||
from six.moves.urllib.parse import urlparse # pylint: disable=F0401
|
||||
|
||||
from swift.common.middleware.s3api.controllers.base import Controller, \
|
||||
bucket_operation, object_operation, check_container_existence
|
||||
from swift.common.middleware.s3api.s3response import InvalidArgument, \
|
||||
ErrorResponse, MalformedXML, \
|
||||
InvalidPart, BucketAlreadyExists, EntityTooSmall, InvalidPartOrder, \
|
||||
InvalidRequest, HTTPOk, HTTPNoContent, NoSuchKey, NoSuchUpload, \
|
||||
NoSuchBucket
|
||||
from swift.common.middleware.s3api.exception import BadSwiftRequest
|
||||
from swift.common.middleware.s3api.utils import unique_id, \
|
||||
MULTIUPLOAD_SUFFIX, S3Timestamp, sysmeta_header
|
||||
from swift.common.middleware.s3api.etree import Element, SubElement, \
|
||||
fromstring, tostring, XMLSyntaxError, DocumentInvalid
|
||||
|
||||
DEFAULT_MAX_PARTS_LISTING = 1000
|
||||
DEFAULT_MAX_UPLOADS = 1000
|
||||
|
||||
MAX_COMPLETE_UPLOAD_BODY_SIZE = 2048 * 1024
|
||||
|
||||
|
||||
def _get_upload_info(req, app, upload_id):
|
||||
|
||||
container = req.container_name + MULTIUPLOAD_SUFFIX
|
||||
obj = '%s/%s' % (req.object_name, upload_id)
|
||||
|
||||
try:
|
||||
return req.get_response(app, 'HEAD', container=container, obj=obj)
|
||||
except NoSuchKey:
|
||||
raise NoSuchUpload(upload_id=upload_id)
|
||||
|
||||
|
||||
def _check_upload_info(req, app, upload_id):
|
||||
|
||||
_get_upload_info(req, app, upload_id)
|
||||
|
||||
|
||||
class PartController(Controller):
|
||||
"""
|
||||
Handles the following APIs:
|
||||
|
||||
* Upload Part
|
||||
* Upload Part - Copy
|
||||
|
||||
Those APIs are logged as PART operations in the S3 server log.
|
||||
"""
|
||||
@public
|
||||
@object_operation
|
||||
@check_container_existence
|
||||
def PUT(self, req):
|
||||
"""
|
||||
Handles Upload Part and Upload Part Copy.
|
||||
"""
|
||||
|
||||
if 'uploadId' not in req.params:
|
||||
raise InvalidArgument('ResourceType', 'partNumber',
|
||||
'Unexpected query string parameter')
|
||||
|
||||
try:
|
||||
part_number = int(req.params['partNumber'])
|
||||
if part_number < 1 or self.conf.max_upload_part_num < part_number:
|
||||
raise Exception()
|
||||
except Exception:
|
||||
err_msg = 'Part number must be an integer between 1 and %d,' \
|
||||
' inclusive' % self.conf.max_upload_part_num
|
||||
raise InvalidArgument('partNumber', req.params['partNumber'],
|
||||
err_msg)
|
||||
|
||||
upload_id = req.params['uploadId']
|
||||
_check_upload_info(req, self.app, upload_id)
|
||||
|
||||
req.container_name += MULTIUPLOAD_SUFFIX
|
||||
req.object_name = '%s/%s/%d' % (req.object_name, upload_id,
|
||||
part_number)
|
||||
|
||||
req_timestamp = S3Timestamp.now()
|
||||
req.headers['X-Timestamp'] = req_timestamp.internal
|
||||
source_resp = req.check_copy_source(self.app)
|
||||
if 'X-Amz-Copy-Source' in req.headers and \
|
||||
'X-Amz-Copy-Source-Range' in req.headers:
|
||||
rng = req.headers['X-Amz-Copy-Source-Range']
|
||||
|
||||
header_valid = True
|
||||
try:
|
||||
rng_obj = Range(rng)
|
||||
if len(rng_obj.ranges) != 1:
|
||||
header_valid = False
|
||||
except ValueError:
|
||||
header_valid = False
|
||||
if not header_valid:
|
||||
err_msg = ('The x-amz-copy-source-range value must be of the '
|
||||
'form bytes=first-last where first and last are '
|
||||
'the zero-based offsets of the first and last '
|
||||
'bytes to copy')
|
||||
raise InvalidArgument('x-amz-source-range', rng, err_msg)
|
||||
|
||||
source_size = int(source_resp.headers['Content-Length'])
|
||||
if not rng_obj.ranges_for_length(source_size):
|
||||
err_msg = ('Range specified is not valid for source object '
|
||||
'of size: %s' % source_size)
|
||||
raise InvalidArgument('x-amz-source-range', rng, err_msg)
|
||||
|
||||
req.headers['Range'] = rng
|
||||
del req.headers['X-Amz-Copy-Source-Range']
|
||||
resp = req.get_response(self.app)
|
||||
|
||||
if 'X-Amz-Copy-Source' in req.headers:
|
||||
resp.append_copy_resp_body(req.controller_name,
|
||||
req_timestamp.s3xmlformat)
|
||||
|
||||
resp.status = 200
|
||||
return resp
|
||||
|
||||
|
||||
class UploadsController(Controller):
|
||||
"""
|
||||
Handles the following APIs:
|
||||
|
||||
* List Multipart Uploads
|
||||
* Initiate Multipart Upload
|
||||
|
||||
Those APIs are logged as UPLOADS operations in the S3 server log.
|
||||
"""
|
||||
@public
|
||||
@bucket_operation(err_resp=InvalidRequest,
|
||||
err_msg="Key is not expected for the GET method "
|
||||
"?uploads subresource")
|
||||
@check_container_existence
|
||||
def GET(self, req):
|
||||
"""
|
||||
Handles List Multipart Uploads
|
||||
"""
|
||||
|
||||
def separate_uploads(uploads, prefix, delimiter):
|
||||
"""
|
||||
separate_uploads will separate uploads into non_delimited_uploads
|
||||
(a subset of uploads) and common_prefixes according to the
|
||||
specified delimiter. non_delimited_uploads is a list of uploads
|
||||
which exclude the delimiter. common_prefixes is a set of prefixes
|
||||
prior to the specified delimiter. Note that the prefix in the
|
||||
common_prefixes includes the delimiter itself.
|
||||
|
||||
i.e. if '/' delimiter specified and then the uploads is consists of
|
||||
['foo', 'foo/bar'], this function will return (['foo'], ['foo/']).
|
||||
|
||||
:param uploads: A list of uploads dictionary
|
||||
:param prefix: A string of prefix reserved on the upload path.
|
||||
(i.e. the delimiter must be searched behind the
|
||||
prefix)
|
||||
:param delimiter: A string of delimiter to split the path in each
|
||||
upload
|
||||
|
||||
:return (non_delimited_uploads, common_prefixes)
|
||||
"""
|
||||
(prefix, delimiter) = \
|
||||
utf8encode(prefix, delimiter)
|
||||
non_delimited_uploads = []
|
||||
common_prefixes = set()
|
||||
for upload in uploads:
|
||||
key = upload['key']
|
||||
end = key.find(delimiter, len(prefix))
|
||||
if end >= 0:
|
||||
common_prefix = key[:end + len(delimiter)]
|
||||
common_prefixes.add(common_prefix)
|
||||
else:
|
||||
non_delimited_uploads.append(upload)
|
||||
return non_delimited_uploads, sorted(common_prefixes)
|
||||
|
||||
encoding_type = req.params.get('encoding-type')
|
||||
if encoding_type is not None and encoding_type != 'url':
|
||||
err_msg = 'Invalid Encoding Method specified in Request'
|
||||
raise InvalidArgument('encoding-type', encoding_type, err_msg)
|
||||
|
||||
keymarker = req.params.get('key-marker', '')
|
||||
uploadid = req.params.get('upload-id-marker', '')
|
||||
maxuploads = req.get_validated_param(
|
||||
'max-uploads', DEFAULT_MAX_UPLOADS, DEFAULT_MAX_UPLOADS)
|
||||
|
||||
query = {
|
||||
'format': 'json',
|
||||
'limit': maxuploads + 1,
|
||||
}
|
||||
|
||||
if uploadid and keymarker:
|
||||
query.update({'marker': '%s/%s' % (keymarker, uploadid)})
|
||||
elif keymarker:
|
||||
query.update({'marker': '%s/~' % (keymarker)})
|
||||
if 'prefix' in req.params:
|
||||
query.update({'prefix': req.params['prefix']})
|
||||
|
||||
container = req.container_name + MULTIUPLOAD_SUFFIX
|
||||
try:
|
||||
resp = req.get_response(self.app, container=container, query=query)
|
||||
objects = json.loads(resp.body)
|
||||
except NoSuchBucket:
|
||||
# Assume NoSuchBucket as no uploads
|
||||
objects = []
|
||||
|
||||
def object_to_upload(object_info):
|
||||
obj, upid = object_info['name'].rsplit('/', 1)
|
||||
obj_dict = {'key': obj,
|
||||
'upload_id': upid,
|
||||
'last_modified': object_info['last_modified']}
|
||||
return obj_dict
|
||||
|
||||
# uploads is a list consists of dict, {key, upload_id, last_modified}
|
||||
# Note that pattern matcher will drop whole segments objects like as
|
||||
# object_name/upload_id/1.
|
||||
pattern = re.compile('/[0-9]+$')
|
||||
uploads = [object_to_upload(obj) for obj in objects if
|
||||
pattern.search(obj.get('name', '')) is None]
|
||||
|
||||
prefixes = []
|
||||
if 'delimiter' in req.params:
|
||||
prefix = req.params.get('prefix', '')
|
||||
delimiter = req.params['delimiter']
|
||||
uploads, prefixes = \
|
||||
separate_uploads(uploads, prefix, delimiter)
|
||||
|
||||
if len(uploads) > maxuploads:
|
||||
uploads = uploads[:maxuploads]
|
||||
truncated = True
|
||||
else:
|
||||
truncated = False
|
||||
|
||||
nextkeymarker = ''
|
||||
nextuploadmarker = ''
|
||||
if len(uploads) > 1:
|
||||
nextuploadmarker = uploads[-1]['upload_id']
|
||||
nextkeymarker = uploads[-1]['key']
|
||||
|
||||
result_elem = Element('ListMultipartUploadsResult')
|
||||
SubElement(result_elem, 'Bucket').text = req.container_name
|
||||
SubElement(result_elem, 'KeyMarker').text = keymarker
|
||||
SubElement(result_elem, 'UploadIdMarker').text = uploadid
|
||||
SubElement(result_elem, 'NextKeyMarker').text = nextkeymarker
|
||||
SubElement(result_elem, 'NextUploadIdMarker').text = nextuploadmarker
|
||||
if 'delimiter' in req.params:
|
||||
SubElement(result_elem, 'Delimiter').text = \
|
||||
req.params['delimiter']
|
||||
if 'prefix' in req.params:
|
||||
SubElement(result_elem, 'Prefix').text = req.params['prefix']
|
||||
SubElement(result_elem, 'MaxUploads').text = str(maxuploads)
|
||||
if encoding_type is not None:
|
||||
SubElement(result_elem, 'EncodingType').text = encoding_type
|
||||
SubElement(result_elem, 'IsTruncated').text = \
|
||||
'true' if truncated else 'false'
|
||||
|
||||
# TODO: don't show uploads which are initiated before this bucket is
|
||||
# created.
|
||||
for u in uploads:
|
||||
upload_elem = SubElement(result_elem, 'Upload')
|
||||
SubElement(upload_elem, 'Key').text = u['key']
|
||||
SubElement(upload_elem, 'UploadId').text = u['upload_id']
|
||||
initiator_elem = SubElement(upload_elem, 'Initiator')
|
||||
SubElement(initiator_elem, 'ID').text = req.user_id
|
||||
SubElement(initiator_elem, 'DisplayName').text = req.user_id
|
||||
owner_elem = SubElement(upload_elem, 'Owner')
|
||||
SubElement(owner_elem, 'ID').text = req.user_id
|
||||
SubElement(owner_elem, 'DisplayName').text = req.user_id
|
||||
SubElement(upload_elem, 'StorageClass').text = 'STANDARD'
|
||||
SubElement(upload_elem, 'Initiated').text = \
|
||||
u['last_modified'][:-3] + 'Z'
|
||||
|
||||
for p in prefixes:
|
||||
elem = SubElement(result_elem, 'CommonPrefixes')
|
||||
SubElement(elem, 'Prefix').text = p
|
||||
|
||||
body = tostring(result_elem, encoding_type=encoding_type)
|
||||
|
||||
return HTTPOk(body=body, content_type='application/xml')
|
||||
|
||||
@public
|
||||
@object_operation
|
||||
@check_container_existence
|
||||
def POST(self, req):
|
||||
"""
|
||||
Handles Initiate Multipart Upload.
|
||||
"""
|
||||
|
||||
# Create a unique S3 upload id from UUID to avoid duplicates.
|
||||
upload_id = unique_id()
|
||||
|
||||
container = req.container_name + MULTIUPLOAD_SUFFIX
|
||||
content_type = req.headers.get('Content-Type')
|
||||
if content_type:
|
||||
req.headers[sysmeta_header('object', 'has-content-type')] = 'yes'
|
||||
req.headers[
|
||||
sysmeta_header('object', 'content-type')] = content_type
|
||||
else:
|
||||
req.headers[sysmeta_header('object', 'has-content-type')] = 'no'
|
||||
req.headers['Content-Type'] = 'application/directory'
|
||||
|
||||
try:
|
||||
req.get_response(self.app, 'PUT', container, '')
|
||||
except BucketAlreadyExists:
|
||||
pass
|
||||
|
||||
obj = '%s/%s' % (req.object_name, upload_id)
|
||||
|
||||
req.headers.pop('Etag', None)
|
||||
req.headers.pop('Content-Md5', None)
|
||||
|
||||
req.get_response(self.app, 'PUT', container, obj, body='')
|
||||
|
||||
result_elem = Element('InitiateMultipartUploadResult')
|
||||
SubElement(result_elem, 'Bucket').text = req.container_name
|
||||
SubElement(result_elem, 'Key').text = req.object_name
|
||||
SubElement(result_elem, 'UploadId').text = upload_id
|
||||
|
||||
body = tostring(result_elem)
|
||||
|
||||
return HTTPOk(body=body, content_type='application/xml')
|
||||
|
||||
|
||||
class UploadController(Controller):
|
||||
"""
|
||||
Handles the following APIs:
|
||||
|
||||
* List Parts
|
||||
* Abort Multipart Upload
|
||||
* Complete Multipart Upload
|
||||
|
||||
Those APIs are logged as UPLOAD operations in the S3 server log.
|
||||
"""
|
||||
@public
|
||||
@object_operation
|
||||
@check_container_existence
|
||||
def GET(self, req):
|
||||
"""
|
||||
Handles List Parts.
|
||||
"""
|
||||
def filter_part_num_marker(o):
|
||||
try:
|
||||
num = int(os.path.basename(o['name']))
|
||||
return num > part_num_marker
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
encoding_type = req.params.get('encoding-type')
|
||||
if encoding_type is not None and encoding_type != 'url':
|
||||
err_msg = 'Invalid Encoding Method specified in Request'
|
||||
raise InvalidArgument('encoding-type', encoding_type, err_msg)
|
||||
|
||||
upload_id = req.params['uploadId']
|
||||
_check_upload_info(req, self.app, upload_id)
|
||||
|
||||
maxparts = req.get_validated_param(
|
||||
'max-parts', DEFAULT_MAX_PARTS_LISTING,
|
||||
self.conf.max_parts_listing)
|
||||
part_num_marker = req.get_validated_param(
|
||||
'part-number-marker', 0)
|
||||
|
||||
query = {
|
||||
'format': 'json',
|
||||
'limit': maxparts + 1,
|
||||
'prefix': '%s/%s/' % (req.object_name, upload_id),
|
||||
'delimiter': '/'
|
||||
}
|
||||
|
||||
container = req.container_name + MULTIUPLOAD_SUFFIX
|
||||
resp = req.get_response(self.app, container=container, obj='',
|
||||
query=query)
|
||||
objects = json.loads(resp.body)
|
||||
|
||||
last_part = 0
|
||||
|
||||
# If the caller requested a list starting at a specific part number,
|
||||
# construct a sub-set of the object list.
|
||||
objList = filter(filter_part_num_marker, objects)
|
||||
|
||||
# pylint: disable-msg=E1103
|
||||
objList.sort(key=lambda o: int(o['name'].split('/')[-1]))
|
||||
|
||||
if len(objList) > maxparts:
|
||||
objList = objList[:maxparts]
|
||||
truncated = True
|
||||
else:
|
||||
truncated = False
|
||||
# TODO: We have to retrieve object list again when truncated is True
|
||||
# and some objects filtered by invalid name because there could be no
|
||||
# enough objects for limit defined by maxparts.
|
||||
|
||||
if objList:
|
||||
o = objList[-1]
|
||||
last_part = os.path.basename(o['name'])
|
||||
|
||||
result_elem = Element('ListPartsResult')
|
||||
SubElement(result_elem, 'Bucket').text = req.container_name
|
||||
SubElement(result_elem, 'Key').text = req.object_name
|
||||
SubElement(result_elem, 'UploadId').text = upload_id
|
||||
|
||||
initiator_elem = SubElement(result_elem, 'Initiator')
|
||||
SubElement(initiator_elem, 'ID').text = req.user_id
|
||||
SubElement(initiator_elem, 'DisplayName').text = req.user_id
|
||||
owner_elem = SubElement(result_elem, 'Owner')
|
||||
SubElement(owner_elem, 'ID').text = req.user_id
|
||||
SubElement(owner_elem, 'DisplayName').text = req.user_id
|
||||
|
||||
SubElement(result_elem, 'StorageClass').text = 'STANDARD'
|
||||
SubElement(result_elem, 'PartNumberMarker').text = str(part_num_marker)
|
||||
SubElement(result_elem, 'NextPartNumberMarker').text = str(last_part)
|
||||
SubElement(result_elem, 'MaxParts').text = str(maxparts)
|
||||
if 'encoding-type' in req.params:
|
||||
SubElement(result_elem, 'EncodingType').text = \
|
||||
req.params['encoding-type']
|
||||
SubElement(result_elem, 'IsTruncated').text = \
|
||||
'true' if truncated else 'false'
|
||||
|
||||
for i in objList:
|
||||
part_elem = SubElement(result_elem, 'Part')
|
||||
SubElement(part_elem, 'PartNumber').text = i['name'].split('/')[-1]
|
||||
SubElement(part_elem, 'LastModified').text = \
|
||||
i['last_modified'][:-3] + 'Z'
|
||||
SubElement(part_elem, 'ETag').text = '"%s"' % i['hash']
|
||||
SubElement(part_elem, 'Size').text = str(i['bytes'])
|
||||
|
||||
body = tostring(result_elem, encoding_type=encoding_type)
|
||||
|
||||
return HTTPOk(body=body, content_type='application/xml')
|
||||
|
||||
@public
|
||||
@object_operation
|
||||
@check_container_existence
|
||||
def DELETE(self, req):
|
||||
"""
|
||||
Handles Abort Multipart Upload.
|
||||
"""
|
||||
upload_id = req.params['uploadId']
|
||||
_check_upload_info(req, self.app, upload_id)
|
||||
|
||||
# First check to see if this multi-part upload was already
|
||||
# completed. Look in the primary container, if the object exists,
|
||||
# then it was completed and we return an error here.
|
||||
container = req.container_name + MULTIUPLOAD_SUFFIX
|
||||
obj = '%s/%s' % (req.object_name, upload_id)
|
||||
req.get_response(self.app, container=container, obj=obj)
|
||||
|
||||
# The completed object was not found so this
|
||||
# must be a multipart upload abort.
|
||||
# We must delete any uploaded segments for this UploadID and then
|
||||
# delete the object in the main container as well
|
||||
query = {
|
||||
'format': 'json',
|
||||
'prefix': '%s/%s/' % (req.object_name, upload_id),
|
||||
'delimiter': '/',
|
||||
}
|
||||
|
||||
resp = req.get_response(self.app, 'GET', container, '', query=query)
|
||||
|
||||
# Iterate over the segment objects and delete them individually
|
||||
objects = json.loads(resp.body)
|
||||
for o in objects:
|
||||
container = req.container_name + MULTIUPLOAD_SUFFIX
|
||||
req.get_response(self.app, container=container, obj=o['name'])
|
||||
|
||||
return HTTPNoContent()
|
||||
|
||||
@public
|
||||
@object_operation
|
||||
@check_container_existence
|
||||
def POST(self, req):
|
||||
"""
|
||||
Handles Complete Multipart Upload.
|
||||
"""
|
||||
upload_id = req.params['uploadId']
|
||||
resp = _get_upload_info(req, self.app, upload_id)
|
||||
headers = {}
|
||||
for key, val in resp.headers.iteritems():
|
||||
_key = key.lower()
|
||||
if _key.startswith('x-amz-meta-'):
|
||||
headers['x-object-meta-' + _key[11:]] = val
|
||||
|
||||
hct_header = sysmeta_header('object', 'has-content-type')
|
||||
if resp.sysmeta_headers.get(hct_header) == 'yes':
|
||||
content_type = resp.sysmeta_headers.get(
|
||||
sysmeta_header('object', 'content-type'))
|
||||
elif hct_header in resp.sysmeta_headers:
|
||||
# has-content-type is present but false, so no content type was
|
||||
# set on initial upload. In that case, we won't set one on our
|
||||
# PUT request. Swift will end up guessing one based on the
|
||||
# object name.
|
||||
content_type = None
|
||||
else:
|
||||
content_type = resp.headers.get('Content-Type')
|
||||
|
||||
if content_type:
|
||||
headers['Content-Type'] = content_type
|
||||
|
||||
# Query for the objects in the segments area to make sure it completed
|
||||
query = {
|
||||
'format': 'json',
|
||||
'prefix': '%s/%s/' % (req.object_name, upload_id),
|
||||
'delimiter': '/'
|
||||
}
|
||||
|
||||
container = req.container_name + MULTIUPLOAD_SUFFIX
|
||||
resp = req.get_response(self.app, 'GET', container, '', query=query)
|
||||
objinfo = json.loads(resp.body)
|
||||
objtable = dict((o['name'],
|
||||
{'path': '/'.join(['', container, o['name']]),
|
||||
'etag': o['hash'],
|
||||
'size_bytes': o['bytes']}) for o in objinfo)
|
||||
|
||||
manifest = []
|
||||
previous_number = 0
|
||||
try:
|
||||
xml = req.xml(MAX_COMPLETE_UPLOAD_BODY_SIZE)
|
||||
if not xml:
|
||||
raise InvalidRequest(msg='You must specify at least one part')
|
||||
|
||||
complete_elem = fromstring(
|
||||
xml, 'CompleteMultipartUpload', self.logger)
|
||||
for part_elem in complete_elem.iterchildren('Part'):
|
||||
part_number = int(part_elem.find('./PartNumber').text)
|
||||
|
||||
if part_number <= previous_number:
|
||||
raise InvalidPartOrder(upload_id=upload_id)
|
||||
previous_number = part_number
|
||||
|
||||
etag = part_elem.find('./ETag').text
|
||||
if len(etag) >= 2 and etag[0] == '"' and etag[-1] == '"':
|
||||
# strip double quotes
|
||||
etag = etag[1:-1]
|
||||
|
||||
info = objtable.get("%s/%s/%s" % (req.object_name, upload_id,
|
||||
part_number))
|
||||
if info is None or info['etag'] != etag:
|
||||
raise InvalidPart(upload_id=upload_id,
|
||||
part_number=part_number)
|
||||
|
||||
info['size_bytes'] = int(info['size_bytes'])
|
||||
manifest.append(info)
|
||||
except (XMLSyntaxError, DocumentInvalid):
|
||||
raise MalformedXML()
|
||||
except ErrorResponse:
|
||||
raise
|
||||
except Exception as e:
|
||||
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
self.logger.error(e)
|
||||
raise exc_type, exc_value, exc_traceback
|
||||
|
||||
# Check the size of each segment except the last and make sure they are
|
||||
# all more than the minimum upload chunk size
|
||||
for info in manifest[:-1]:
|
||||
if info['size_bytes'] < self.conf.min_segment_size:
|
||||
raise EntityTooSmall()
|
||||
|
||||
try:
|
||||
# TODO: add support for versioning
|
||||
if manifest:
|
||||
resp = req.get_response(self.app, 'PUT',
|
||||
body=json.dumps(manifest),
|
||||
query={'multipart-manifest': 'put'},
|
||||
headers=headers)
|
||||
else:
|
||||
# the upload must have consisted of a single zero-length part
|
||||
# just write it directly
|
||||
resp = req.get_response(self.app, 'PUT', body='',
|
||||
headers=headers)
|
||||
except BadSwiftRequest as e:
|
||||
msg = str(e)
|
||||
expected_msg = 'too small; each segment must be at least 1 byte'
|
||||
if expected_msg in msg:
|
||||
# FIXME: AWS S3 allows a smaller object than 5 MB if there is
|
||||
# only one part. Use a COPY request to copy the part object
|
||||
# from the segments container instead.
|
||||
raise EntityTooSmall(msg)
|
||||
else:
|
||||
raise
|
||||
|
||||
# clean up the multipart-upload record
|
||||
obj = '%s/%s' % (req.object_name, upload_id)
|
||||
try:
|
||||
req.get_response(self.app, 'DELETE', container, obj)
|
||||
except NoSuchKey:
|
||||
pass # We know that this existed long enough for us to HEAD
|
||||
|
||||
result_elem = Element('CompleteMultipartUploadResult')
|
||||
|
||||
# NOTE: boto with sig v4 appends port to HTTP_HOST value at the
|
||||
# request header when the port is non default value and it makes
|
||||
# req.host_url like as http://localhost:8080:8080/path
|
||||
# that obviously invalid. Probably it should be resolved at
|
||||
# swift.common.swob though, tentatively we are parsing and
|
||||
# reconstructing the correct host_url info here.
|
||||
# in detail, https://github.com/boto/boto/pull/3513
|
||||
parsed_url = urlparse(req.host_url)
|
||||
host_url = '%s://%s' % (parsed_url.scheme, parsed_url.hostname)
|
||||
if parsed_url.port:
|
||||
host_url += ':%s' % parsed_url.port
|
||||
|
||||
SubElement(result_elem, 'Location').text = host_url + req.path
|
||||
SubElement(result_elem, 'Bucket').text = req.container_name
|
||||
SubElement(result_elem, 'Key').text = req.object_name
|
||||
SubElement(result_elem, 'ETag').text = resp.etag
|
||||
|
||||
resp.body = tostring(result_elem)
|
||||
resp.status = 200
|
||||
resp.content_type = "application/xml"
|
||||
|
||||
return resp
|
150
swift/common/middleware/s3api/controllers/obj.py
Normal file
150
swift/common/middleware/s3api/controllers/obj.py
Normal file
@ -0,0 +1,150 @@
|
||||
# Copyright (c) 2010-2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
|
||||
from swift.common.http import HTTP_OK, HTTP_PARTIAL_CONTENT, HTTP_NO_CONTENT
|
||||
from swift.common.swob import Range, content_range_header_value
|
||||
from swift.common.utils import public
|
||||
|
||||
from swift.common.middleware.s3api.utils import S3Timestamp
|
||||
from swift.common.middleware.s3api.controllers.base import Controller
|
||||
from swift.common.middleware.s3api.s3response import S3NotImplemented, \
|
||||
InvalidRange, NoSuchKey, InvalidArgument
|
||||
|
||||
|
||||
class ObjectController(Controller):
|
||||
"""
|
||||
Handles requests on objects
|
||||
"""
|
||||
def _gen_head_range_resp(self, req_range, resp):
|
||||
"""
|
||||
Swift doesn't handle Range header for HEAD requests.
|
||||
So, this method generates HEAD range response from HEAD response.
|
||||
S3 return HEAD range response, if the value of range satisfies the
|
||||
conditions which are described in the following document.
|
||||
- http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35
|
||||
"""
|
||||
length = long(resp.headers.get('Content-Length'))
|
||||
|
||||
try:
|
||||
content_range = Range(req_range)
|
||||
except ValueError:
|
||||
return resp
|
||||
|
||||
ranges = content_range.ranges_for_length(length)
|
||||
if ranges == []:
|
||||
raise InvalidRange()
|
||||
elif ranges:
|
||||
if len(ranges) == 1:
|
||||
start, end = ranges[0]
|
||||
resp.headers['Content-Range'] = \
|
||||
content_range_header_value(start, end, length)
|
||||
resp.headers['Content-Length'] = (end - start)
|
||||
resp.status = HTTP_PARTIAL_CONTENT
|
||||
return resp
|
||||
else:
|
||||
# TODO: It is necessary to confirm whether need to respond to
|
||||
# multi-part response.(e.g. bytes=0-10,20-30)
|
||||
pass
|
||||
|
||||
return resp
|
||||
|
||||
def GETorHEAD(self, req):
|
||||
resp = req.get_response(self.app)
|
||||
|
||||
if req.method == 'HEAD':
|
||||
resp.app_iter = None
|
||||
|
||||
for key in ('content-type', 'content-language', 'expires',
|
||||
'cache-control', 'content-disposition',
|
||||
'content-encoding'):
|
||||
if 'response-' + key in req.params:
|
||||
resp.headers[key] = req.params['response-' + key]
|
||||
|
||||
return resp
|
||||
|
||||
@public
|
||||
def HEAD(self, req):
|
||||
"""
|
||||
Handle HEAD Object request
|
||||
"""
|
||||
resp = self.GETorHEAD(req)
|
||||
|
||||
if 'range' in req.headers:
|
||||
req_range = req.headers['range']
|
||||
resp = self._gen_head_range_resp(req_range, resp)
|
||||
|
||||
return resp
|
||||
|
||||
@public
|
||||
def GET(self, req):
|
||||
"""
|
||||
Handle GET Object request
|
||||
"""
|
||||
return self.GETorHEAD(req)
|
||||
|
||||
@public
|
||||
def PUT(self, req):
|
||||
"""
|
||||
Handle PUT Object and PUT Object (Copy) request
|
||||
"""
|
||||
# set X-Timestamp by s3api to use at copy resp body
|
||||
req_timestamp = S3Timestamp.now()
|
||||
req.headers['X-Timestamp'] = req_timestamp.internal
|
||||
if all(h in req.headers
|
||||
for h in ('X-Amz-Copy-Source', 'X-Amz-Copy-Source-Range')):
|
||||
raise InvalidArgument('x-amz-copy-source-range',
|
||||
req.headers['X-Amz-Copy-Source-Range'],
|
||||
'Illegal copy header')
|
||||
req.check_copy_source(self.app)
|
||||
resp = req.get_response(self.app)
|
||||
|
||||
if 'X-Amz-Copy-Source' in req.headers:
|
||||
resp.append_copy_resp_body(req.controller_name,
|
||||
req_timestamp.s3xmlformat)
|
||||
|
||||
# delete object metadata from response
|
||||
for key in list(resp.headers.keys()):
|
||||
if key.startswith('x-amz-meta-'):
|
||||
del resp.headers[key]
|
||||
|
||||
resp.status = HTTP_OK
|
||||
return resp
|
||||
|
||||
@public
|
||||
def POST(self, req):
|
||||
raise S3NotImplemented()
|
||||
|
||||
@public
|
||||
def DELETE(self, req):
|
||||
"""
|
||||
Handle DELETE Object request
|
||||
"""
|
||||
try:
|
||||
query = req.gen_multipart_manifest_delete_query(self.app)
|
||||
req.headers['Content-Type'] = None # Ignore client content-type
|
||||
resp = req.get_response(self.app, query=query)
|
||||
if query and resp.status_int == HTTP_OK:
|
||||
for chunk in resp.app_iter:
|
||||
pass # drain the bulk-deleter response
|
||||
resp.status = HTTP_NO_CONTENT
|
||||
resp.body = ''
|
||||
except NoSuchKey:
|
||||
# expect to raise NoSuchBucket when the bucket doesn't exist
|
||||
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
req.get_container_info(self.app)
|
||||
raise exc_type, exc_value, exc_traceback
|
||||
return resp
|
67
swift/common/middleware/s3api/controllers/s3_acl.py
Normal file
67
swift/common/middleware/s3api/controllers/s3_acl.py
Normal file
@ -0,0 +1,67 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from urllib import quote
|
||||
from swift.common.utils import public
|
||||
|
||||
from swift.common.middleware.s3api.controllers.base import Controller
|
||||
from swift.common.middleware.s3api.s3response import HTTPOk
|
||||
from swift.common.middleware.s3api.etree import tostring
|
||||
|
||||
|
||||
class S3AclController(Controller):
|
||||
"""
|
||||
Handles the following APIs:
|
||||
|
||||
* GET Bucket acl
|
||||
* PUT Bucket acl
|
||||
* GET Object acl
|
||||
* PUT Object acl
|
||||
|
||||
Those APIs are logged as ACL operations in the S3 server log.
|
||||
"""
|
||||
@public
|
||||
def GET(self, req):
|
||||
"""
|
||||
Handles GET Bucket acl and GET Object acl.
|
||||
"""
|
||||
resp = req.get_response(self.app)
|
||||
|
||||
acl = resp.object_acl if req.is_object_request else resp.bucket_acl
|
||||
|
||||
resp = HTTPOk()
|
||||
resp.body = tostring(acl.elem())
|
||||
|
||||
return resp
|
||||
|
||||
@public
|
||||
def PUT(self, req):
|
||||
"""
|
||||
Handles PUT Bucket acl and PUT Object acl.
|
||||
"""
|
||||
if req.is_object_request:
|
||||
headers = {}
|
||||
src_path = '/%s/%s' % (req.container_name, req.object_name)
|
||||
|
||||
# object-sysmeta' can be updated by 'Copy' method,
|
||||
# but can not be by 'POST' method.
|
||||
# So headers['X-Copy-From'] for copy request is added here.
|
||||
headers['X-Copy-From'] = quote(src_path)
|
||||
headers['Content-Length'] = 0
|
||||
req.get_response(self.app, 'PUT', headers=headers)
|
||||
else:
|
||||
req.get_response(self.app, 'POST')
|
||||
|
||||
return HTTPOk()
|
68
swift/common/middleware/s3api/controllers/service.py
Normal file
68
swift/common/middleware/s3api/controllers/service.py
Normal file
@ -0,0 +1,68 @@
|
||||
# Copyright (c) 2010-2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from swift.common.utils import json, public
|
||||
|
||||
from swift.common.middleware.s3api.controllers.base import Controller
|
||||
from swift.common.middleware.s3api.etree import Element, SubElement, tostring
|
||||
from swift.common.middleware.s3api.s3response import HTTPOk, AccessDenied, \
|
||||
NoSuchBucket
|
||||
from swift.common.middleware.s3api.utils import validate_bucket_name
|
||||
|
||||
|
||||
class ServiceController(Controller):
|
||||
"""
|
||||
Handles account level requests.
|
||||
"""
|
||||
@public
|
||||
def GET(self, req):
|
||||
"""
|
||||
Handle GET Service request
|
||||
"""
|
||||
resp = req.get_response(self.app, query={'format': 'json'})
|
||||
|
||||
containers = json.loads(resp.body)
|
||||
|
||||
containers = filter(
|
||||
lambda item: validate_bucket_name(
|
||||
item['name'], self.conf.dns_compliant_bucket_names),
|
||||
containers)
|
||||
|
||||
# we don't keep the creation time of a bucket (s3cmd doesn't
|
||||
# work without that) so we use something bogus.
|
||||
elem = Element('ListAllMyBucketsResult')
|
||||
|
||||
owner = SubElement(elem, 'Owner')
|
||||
SubElement(owner, 'ID').text = req.user_id
|
||||
SubElement(owner, 'DisplayName').text = req.user_id
|
||||
|
||||
buckets = SubElement(elem, 'Buckets')
|
||||
for c in containers:
|
||||
if self.conf.s3_acl and self.conf.check_bucket_owner:
|
||||
try:
|
||||
req.get_response(self.app, 'HEAD', c['name'])
|
||||
except AccessDenied:
|
||||
continue
|
||||
except NoSuchBucket:
|
||||
continue
|
||||
|
||||
bucket = SubElement(buckets, 'Bucket')
|
||||
SubElement(bucket, 'Name').text = c['name']
|
||||
SubElement(bucket, 'CreationDate').text = \
|
||||
'2009-02-03T16:45:09.000Z'
|
||||
|
||||
body = tostring(elem)
|
||||
|
||||
return HTTPOk(content_type='application/xml', body=body)
|
53
swift/common/middleware/s3api/controllers/versioning.py
Normal file
53
swift/common/middleware/s3api/controllers/versioning.py
Normal file
@ -0,0 +1,53 @@
|
||||
# Copyright (c) 2010-2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from swift.common.utils import public
|
||||
|
||||
from swift.common.middleware.s3api.controllers.base import Controller, \
|
||||
bucket_operation
|
||||
from swift.common.middleware.s3api.etree import Element, tostring
|
||||
from swift.common.middleware.s3api.s3response import HTTPOk, S3NotImplemented
|
||||
|
||||
|
||||
class VersioningController(Controller):
|
||||
"""
|
||||
Handles the following APIs:
|
||||
|
||||
* GET Bucket versioning
|
||||
* PUT Bucket versioning
|
||||
|
||||
Those APIs are logged as VERSIONING operations in the S3 server log.
|
||||
"""
|
||||
@public
|
||||
@bucket_operation
|
||||
def GET(self, req):
|
||||
"""
|
||||
Handles GET Bucket versioning.
|
||||
"""
|
||||
req.get_response(self.app, method='HEAD')
|
||||
|
||||
# Just report there is no versioning configured here.
|
||||
elem = Element('VersioningConfiguration')
|
||||
body = tostring(elem)
|
||||
|
||||
return HTTPOk(body=body, content_type="text/plain")
|
||||
|
||||
@public
|
||||
@bucket_operation
|
||||
def PUT(self, req):
|
||||
"""
|
||||
Handles PUT Bucket versioning.
|
||||
"""
|
||||
raise S3NotImplemented()
|
146
swift/common/middleware/s3api/etree.py
Normal file
146
swift/common/middleware/s3api/etree.py
Normal file
@ -0,0 +1,146 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import lxml.etree
|
||||
from urllib import quote
|
||||
from copy import deepcopy
|
||||
from pkg_resources import resource_stream # pylint: disable-msg=E0611
|
||||
import sys
|
||||
|
||||
from swift.common.utils import get_logger
|
||||
from swift.common.middleware.s3api.exception import S3Exception
|
||||
from swift.common.middleware.s3api.utils import camel_to_snake, \
|
||||
utf8encode, utf8decode
|
||||
|
||||
XMLNS_S3 = 'http://s3.amazonaws.com/doc/2006-03-01/'
|
||||
XMLNS_XSI = 'http://www.w3.org/2001/XMLSchema-instance'
|
||||
|
||||
|
||||
class XMLSyntaxError(S3Exception):
|
||||
pass
|
||||
|
||||
|
||||
class DocumentInvalid(S3Exception):
|
||||
pass
|
||||
|
||||
|
||||
def cleanup_namespaces(elem):
|
||||
def remove_ns(tag, ns):
|
||||
if tag.startswith('{%s}' % ns):
|
||||
tag = tag[len('{%s}' % ns):]
|
||||
return tag
|
||||
|
||||
if not isinstance(elem.tag, basestring):
|
||||
# elem is a comment element.
|
||||
return
|
||||
|
||||
# remove s3 namespace
|
||||
elem.tag = remove_ns(elem.tag, XMLNS_S3)
|
||||
|
||||
# remove default namespace
|
||||
if elem.nsmap and None in elem.nsmap:
|
||||
elem.tag = remove_ns(elem.tag, elem.nsmap[None])
|
||||
|
||||
for e in elem.iterchildren():
|
||||
cleanup_namespaces(e)
|
||||
|
||||
|
||||
def fromstring(text, root_tag=None, logger=None):
|
||||
try:
|
||||
elem = lxml.etree.fromstring(text, parser)
|
||||
except lxml.etree.XMLSyntaxError as e:
|
||||
if logger:
|
||||
logger.debug(e)
|
||||
raise XMLSyntaxError(e)
|
||||
|
||||
cleanup_namespaces(elem)
|
||||
|
||||
if root_tag is not None:
|
||||
# validate XML
|
||||
try:
|
||||
path = 'schema/%s.rng' % camel_to_snake(root_tag)
|
||||
with resource_stream(__name__, path) as rng:
|
||||
lxml.etree.RelaxNG(file=rng).assertValid(elem)
|
||||
except IOError as e:
|
||||
# Probably, the schema file doesn't exist.
|
||||
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
logger = logger or get_logger({}, log_route='s3api')
|
||||
logger.error(e)
|
||||
raise exc_type, exc_value, exc_traceback
|
||||
except lxml.etree.DocumentInvalid as e:
|
||||
if logger:
|
||||
logger.debug(e)
|
||||
raise DocumentInvalid(e)
|
||||
|
||||
return elem
|
||||
|
||||
|
||||
def tostring(tree, encoding_type=None, use_s3ns=True):
|
||||
if use_s3ns:
|
||||
nsmap = tree.nsmap.copy()
|
||||
nsmap[None] = XMLNS_S3
|
||||
|
||||
root = Element(tree.tag, attrib=tree.attrib, nsmap=nsmap)
|
||||
root.text = tree.text
|
||||
root.extend(deepcopy(tree.getchildren()))
|
||||
tree = root
|
||||
|
||||
if encoding_type == 'url':
|
||||
tree = deepcopy(tree)
|
||||
for e in tree.iter():
|
||||
# Some elements are not url-encoded even when we specify
|
||||
# encoding_type=url.
|
||||
blacklist = ['LastModified', 'ID', 'DisplayName', 'Initiated']
|
||||
if e.tag not in blacklist:
|
||||
if isinstance(e.text, basestring):
|
||||
e.text = quote(e.text)
|
||||
|
||||
return lxml.etree.tostring(tree, xml_declaration=True, encoding='UTF-8')
|
||||
|
||||
|
||||
class _Element(lxml.etree.ElementBase):
|
||||
"""
|
||||
Wrapper Element class of lxml.etree.Element to support
|
||||
a utf-8 encoded non-ascii string as a text.
|
||||
|
||||
Why we need this?:
|
||||
Original lxml.etree.Element supports only unicode for the text.
|
||||
It declines maintainability because we have to call a lot of encode/decode
|
||||
methods to apply account/container/object name (i.e. PATH_INFO) to each
|
||||
Element instance. When using this class, we can remove such a redundant
|
||||
codes from swift.common.middleware.s3api middleware.
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
# pylint: disable-msg=E1002
|
||||
super(_Element, self).__init__(*args, **kwargs)
|
||||
|
||||
@property
|
||||
def text(self):
|
||||
"""
|
||||
utf-8 wrapper property of lxml.etree.Element.text
|
||||
"""
|
||||
return utf8encode(lxml.etree.ElementBase.text.__get__(self))
|
||||
|
||||
@text.setter
|
||||
def text(self, value):
|
||||
lxml.etree.ElementBase.text.__set__(self, utf8decode(value))
|
||||
|
||||
|
||||
parser_lookup = lxml.etree.ElementDefaultClassLookup(element=_Element)
|
||||
parser = lxml.etree.XMLParser()
|
||||
parser.set_element_class_lookup(parser_lookup)
|
||||
|
||||
Element = parser.makeelement
|
||||
SubElement = lxml.etree.SubElement
|
36
swift/common/middleware/s3api/exception.py
Normal file
36
swift/common/middleware/s3api/exception.py
Normal file
@ -0,0 +1,36 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
class S3Exception(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class NotS3Request(S3Exception):
|
||||
pass
|
||||
|
||||
|
||||
class BadSwiftRequest(S3Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ACLError(S3Exception):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidSubresource(S3Exception):
|
||||
def __init__(self, resource, cause):
|
||||
self.resource = resource
|
||||
self.cause = cause
|
280
swift/common/middleware/s3api/s3api.py
Normal file
280
swift/common/middleware/s3api/s3api.py
Normal file
@ -0,0 +1,280 @@
|
||||
# Copyright (c) 2010-2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
The s3api middleware will emulate the S3 REST api on top of swift.
|
||||
|
||||
To enable this middleware to your configuration, add the s3api middleware
|
||||
in front of the auth middleware. See ``proxy-server.conf-sample`` for more
|
||||
detail and configurable options.
|
||||
|
||||
To set up your client, the access key will be the concatenation of the
|
||||
account and user strings that should look like test:tester, and the
|
||||
secret access key is the account password. The host should also point
|
||||
to the swift storage hostname.
|
||||
|
||||
An example client using the python boto library is as follows::
|
||||
|
||||
from boto.s3.connection import S3Connection
|
||||
connection = S3Connection(
|
||||
aws_access_key_id='test:tester',
|
||||
aws_secret_access_key='testing',
|
||||
port=8080,
|
||||
host='127.0.0.1',
|
||||
is_secure=False,
|
||||
calling_format=boto.s3.connection.OrdinaryCallingFormat())
|
||||
|
||||
----------
|
||||
Deployment
|
||||
----------
|
||||
|
||||
Proxy-Server Setting
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Set s3api before your auth in your pipeline in ``proxy-server.conf`` file.
|
||||
To enable all compatiblity currently supported, you should make sure that
|
||||
bulk, slo, and your auth middleware are also included in your proxy
|
||||
pipeline setting.
|
||||
|
||||
Minimum example config is::
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = proxy-logging cache s3api tempauth bulk slo proxy-logging
|
||||
proxy-server
|
||||
|
||||
When using keystone, the config will be::
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = proxy-logging cache s3api s3token keystoneauth bulk slo
|
||||
proxy-logging proxy-server
|
||||
|
||||
.. note::
|
||||
``keystonemiddleware.authtoken`` can be located before/after s3api but
|
||||
we recommend to put it before s3api because when authtoken is after s3api,
|
||||
both authtoken and s3token will issue the acceptable token to keystone
|
||||
(i.e. authenticate twice).
|
||||
|
||||
Object-Server Setting
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
To get better compatibility, you may add S3 supported headers (
|
||||
Cache-Control, Content-Language, Expires, and X-Robots-Tag), that are
|
||||
not supporeted in Swift by default, into allowed_headers option in
|
||||
``object-server.conf`` Please see ``object-server.conf`` for more detail.
|
||||
|
||||
-----------
|
||||
Constraints
|
||||
-----------
|
||||
Currently, the s3api is being ported from https://github.com/openstack/swift3
|
||||
so any existing issues in swift3 are still remaining. Please make sure
|
||||
descriptions in the example ``proxy-server.conf`` and what happens with the
|
||||
config, before enabling the options.
|
||||
|
||||
-------------
|
||||
Supported API
|
||||
-------------
|
||||
The compatibility will continue to be improved upstream, you can keep and
|
||||
eye on compatibility via a check tool build by SwiftStack. See
|
||||
https://github.com/swiftstack/s3compat in detail.
|
||||
|
||||
"""
|
||||
|
||||
from paste.deploy import loadwsgi
|
||||
|
||||
from swift.common.wsgi import PipelineWrapper, loadcontext
|
||||
|
||||
from swift.common.middleware.s3api.exception import NotS3Request, \
|
||||
InvalidSubresource
|
||||
from swift.common.middleware.s3api.s3request import get_request_class
|
||||
from swift.common.middleware.s3api.s3response import ErrorResponse, \
|
||||
InternalError, MethodNotAllowed, S3ResponseBase, S3NotImplemented
|
||||
from swift.common.utils import get_logger, register_swift_info, \
|
||||
config_true_value, config_positive_int_value
|
||||
from swift.common.middleware.s3api.utils import Config
|
||||
from swift.common.middleware.s3api.acl_handlers import get_acl_handler
|
||||
|
||||
|
||||
class S3ApiMiddleware(object):
|
||||
"""S3Api: S3 compatibility middleware"""
|
||||
def __init__(self, app, conf, *args, **kwargs):
|
||||
self.app = app
|
||||
self.conf = Config()
|
||||
|
||||
# Set default values if they are not configured
|
||||
self.conf.allow_no_owner = config_true_value(
|
||||
conf.get('allow_no_owner', False))
|
||||
self.conf.location = conf.get('location', 'US')
|
||||
self.conf.dns_compliant_bucket_names = config_true_value(
|
||||
conf.get('dns_compliant_bucket_names', True))
|
||||
self.conf.max_bucket_listing = config_positive_int_value(
|
||||
conf.get('max_bucket_listing', 1000))
|
||||
self.conf.max_parts_listing = config_positive_int_value(
|
||||
conf.get('max_parts_listing', 1000))
|
||||
self.conf.max_multi_delete_objects = config_positive_int_value(
|
||||
conf.get('max_multi_delete_objects', 1000))
|
||||
self.conf.s3_acl = config_true_value(
|
||||
conf.get('s3_acl', False))
|
||||
self.conf.storage_domain = conf.get('storage_domain', '')
|
||||
self.conf.auth_pipeline_check = config_true_value(
|
||||
conf.get('auth_pipeline_check', True))
|
||||
self.conf.max_upload_part_num = config_positive_int_value(
|
||||
conf.get('max_upload_part_num', 1000))
|
||||
self.conf.check_bucket_owner = config_true_value(
|
||||
conf.get('check_bucket_owner', False))
|
||||
self.conf.force_swift_request_proxy_log = config_true_value(
|
||||
conf.get('force_swift_request_proxy_log', False))
|
||||
self.conf.allow_multipart_uploads = config_true_value(
|
||||
conf.get('allow_multipart_uploads', True))
|
||||
self.conf.min_segment_size = config_positive_int_value(
|
||||
conf.get('min_segment_size', 5242880))
|
||||
|
||||
self.logger = get_logger(
|
||||
conf, log_route=conf.get('log_name', 's3api'))
|
||||
self.slo_enabled = self.conf.allow_multipart_uploads
|
||||
self.check_pipeline(self.conf)
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
try:
|
||||
req_class = get_request_class(env, self.conf.s3_acl)
|
||||
req = req_class(
|
||||
env, self.app, self.slo_enabled, self.conf.storage_domain,
|
||||
self.conf.location, self.conf.force_swift_request_proxy_log,
|
||||
self.conf.dns_compliant_bucket_names,
|
||||
self.conf.allow_multipart_uploads, self.conf.allow_no_owner)
|
||||
resp = self.handle_request(req)
|
||||
except NotS3Request:
|
||||
resp = self.app
|
||||
except InvalidSubresource as e:
|
||||
self.logger.debug(e.cause)
|
||||
except ErrorResponse as err_resp:
|
||||
if isinstance(err_resp, InternalError):
|
||||
self.logger.exception(err_resp)
|
||||
resp = err_resp
|
||||
except Exception as e:
|
||||
self.logger.exception(e)
|
||||
resp = InternalError(reason=e)
|
||||
|
||||
if isinstance(resp, S3ResponseBase) and 'swift.trans_id' in env:
|
||||
resp.headers['x-amz-id-2'] = env['swift.trans_id']
|
||||
resp.headers['x-amz-request-id'] = env['swift.trans_id']
|
||||
|
||||
return resp(env, start_response)
|
||||
|
||||
def handle_request(self, req):
|
||||
self.logger.debug('Calling S3Api Middleware')
|
||||
self.logger.debug(req.__dict__)
|
||||
try:
|
||||
controller = req.controller(self.app, self.conf, self.logger)
|
||||
except S3NotImplemented:
|
||||
# TODO: Probably we should distinct the error to log this warning
|
||||
self.logger.warning('multipart: No SLO middleware in pipeline')
|
||||
raise
|
||||
|
||||
acl_handler = get_acl_handler(req.controller_name)(req, self.logger)
|
||||
req.set_acl_handler(acl_handler)
|
||||
|
||||
if hasattr(controller, req.method):
|
||||
handler = getattr(controller, req.method)
|
||||
if not getattr(handler, 'publicly_accessible', False):
|
||||
raise MethodNotAllowed(req.method,
|
||||
req.controller.resource_type())
|
||||
res = handler(req)
|
||||
else:
|
||||
raise MethodNotAllowed(req.method,
|
||||
req.controller.resource_type())
|
||||
|
||||
return res
|
||||
|
||||
def check_pipeline(self, conf):
|
||||
"""
|
||||
Check that proxy-server.conf has an appropriate pipeline for s3api.
|
||||
"""
|
||||
if conf.get('__file__', None) is None:
|
||||
return
|
||||
|
||||
ctx = loadcontext(loadwsgi.APP, conf.__file__)
|
||||
pipeline = str(PipelineWrapper(ctx)).split(' ')
|
||||
|
||||
# Add compatible with 3rd party middleware.
|
||||
self.check_filter_order(pipeline, ['s3api', 'proxy-server'])
|
||||
|
||||
auth_pipeline = pipeline[pipeline.index('s3api') + 1:
|
||||
pipeline.index('proxy-server')]
|
||||
|
||||
# Check SLO middleware
|
||||
if self.slo_enabled and 'slo' not in auth_pipeline:
|
||||
self.slo_enabled = False
|
||||
self.logger.warning('s3api middleware requires SLO middleware '
|
||||
'to support multi-part upload, please add it '
|
||||
'in pipeline')
|
||||
|
||||
if not conf.auth_pipeline_check:
|
||||
self.logger.debug('Skip pipeline auth check.')
|
||||
return
|
||||
|
||||
if 'tempauth' in auth_pipeline:
|
||||
self.logger.debug('Use tempauth middleware.')
|
||||
elif 'keystoneauth' in auth_pipeline:
|
||||
self.check_filter_order(
|
||||
auth_pipeline,
|
||||
['s3token', 'keystoneauth'])
|
||||
self.logger.debug('Use keystone middleware.')
|
||||
elif len(auth_pipeline):
|
||||
self.logger.debug('Use third party(unknown) auth middleware.')
|
||||
else:
|
||||
raise ValueError('Invalid pipeline %r: expected auth between '
|
||||
's3api and proxy-server ' % pipeline)
|
||||
|
||||
def check_filter_order(self, pipeline, required_filters):
|
||||
"""
|
||||
Check that required filters are present in order in the pipeline.
|
||||
"""
|
||||
indexes = []
|
||||
missing_filters = []
|
||||
for required_filter in required_filters:
|
||||
try:
|
||||
indexes.append(pipeline.index(required_filter))
|
||||
except ValueError as e:
|
||||
self.logger.debug(e)
|
||||
missing_filters.append(required_filter)
|
||||
|
||||
if missing_filters:
|
||||
raise ValueError('Invalid pipeline %r: missing filters %r' % (
|
||||
pipeline, missing_filters))
|
||||
|
||||
if indexes != sorted(indexes):
|
||||
raise ValueError('Invalid pipeline %r: expected filter %s' % (
|
||||
pipeline, ' before '.join(required_filters)))
|
||||
|
||||
|
||||
def filter_factory(global_conf, **local_conf):
|
||||
"""Standard filter factory to use the middleware with paste.deploy"""
|
||||
conf = global_conf.copy()
|
||||
conf.update(local_conf)
|
||||
|
||||
register_swift_info(
|
||||
's3api',
|
||||
# TODO: make default values as variables
|
||||
max_bucket_listing=conf.get('max_bucket_listing', 1000),
|
||||
max_parts_listing=conf.get('max_parts_listing', 1000),
|
||||
max_upload_part_num=conf.get('max_upload_part_num', 1000),
|
||||
max_multi_delete_objects=conf.get('max_multi_delete_objects', 1000),
|
||||
allow_multipart_uploads=conf.get('allow_multipart_uploads', True),
|
||||
min_segment_size=conf.get('min_segment_size', 5242880),
|
||||
)
|
||||
|
||||
def s3api_filter(app):
|
||||
return S3ApiMiddleware(app, conf)
|
||||
|
||||
return s3api_filter
|
1402
swift/common/middleware/s3api/s3request.py
Normal file
1402
swift/common/middleware/s3api/s3request.py
Normal file
File diff suppressed because it is too large
Load Diff
684
swift/common/middleware/s3api/s3response.py
Normal file
684
swift/common/middleware/s3api/s3response.py
Normal file
@ -0,0 +1,684 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
from UserDict import DictMixin
|
||||
from functools import partial
|
||||
|
||||
from swift.common import swob
|
||||
from swift.common.utils import config_true_value
|
||||
from swift.common.request_helpers import is_sys_meta
|
||||
|
||||
from swift.common.middleware.s3api.utils import snake_to_camel, sysmeta_prefix
|
||||
from swift.common.middleware.s3api.etree import Element, SubElement, tostring
|
||||
|
||||
|
||||
class HeaderKey(str):
|
||||
"""
|
||||
A string object that normalizes string as S3 clients expect with title().
|
||||
"""
|
||||
def title(self):
|
||||
if self.lower() == 'etag':
|
||||
# AWS Java SDK expects only 'ETag'.
|
||||
return 'ETag'
|
||||
if self.lower().startswith('x-amz-'):
|
||||
# AWS headers returned by S3 are lowercase.
|
||||
return self.lower()
|
||||
return str.title(self)
|
||||
|
||||
|
||||
class HeaderKeyDict(swob.HeaderKeyDict):
|
||||
"""
|
||||
Similar to the HeaderKeyDict class in Swift, but its key name is normalized
|
||||
as S3 clients expect.
|
||||
"""
|
||||
def __getitem__(self, key):
|
||||
return swob.HeaderKeyDict.__getitem__(self, HeaderKey(key))
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
return swob.HeaderKeyDict.__setitem__(self, HeaderKey(key), value)
|
||||
|
||||
def __contains__(self, key):
|
||||
return swob.HeaderKeyDict.__contains__(self, HeaderKey(key))
|
||||
|
||||
def __delitem__(self, key):
|
||||
return swob.HeaderKeyDict.__delitem__(self, HeaderKey(key))
|
||||
|
||||
def get(self, key, default=None):
|
||||
return swob.HeaderKeyDict.get(self, HeaderKey(key), default)
|
||||
|
||||
def pop(self, key, default=None):
|
||||
return swob.HeaderKeyDict.pop(self, HeaderKey(key), default)
|
||||
|
||||
|
||||
class S3ResponseBase(object):
|
||||
"""
|
||||
Base class for swift3 responses.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class S3Response(S3ResponseBase, swob.Response):
|
||||
"""
|
||||
Similar to the Response class in Swift, but uses our HeaderKeyDict for
|
||||
headers instead of Swift's HeaderKeyDict. This also translates Swift
|
||||
specific headers to S3 headers.
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
swob.Response.__init__(self, *args, **kwargs)
|
||||
|
||||
if self.etag:
|
||||
# add double quotes to the etag header
|
||||
self.etag = self.etag
|
||||
|
||||
sw_sysmeta_headers = swob.HeaderKeyDict()
|
||||
sw_headers = swob.HeaderKeyDict()
|
||||
headers = HeaderKeyDict()
|
||||
self.is_slo = False
|
||||
|
||||
def is_swift3_sysmeta(sysmeta_key, server_type):
|
||||
swift3_sysmeta_prefix = (
|
||||
'x-%s-sysmeta-swift3' % server_type).lower()
|
||||
return sysmeta_key.lower().startswith(swift3_sysmeta_prefix)
|
||||
|
||||
def is_s3api_sysmeta(sysmeta_key, server_type):
|
||||
s3api_sysmeta_prefix = sysmeta_prefix(_server_type).lower()
|
||||
return sysmeta_key.lower().startswith(s3api_sysmeta_prefix)
|
||||
|
||||
for key, val in self.headers.iteritems():
|
||||
if is_sys_meta('object', key) or is_sys_meta('container', key):
|
||||
_server_type = key.split('-')[1]
|
||||
if is_swift3_sysmeta(key, _server_type):
|
||||
# To be compatible with older swift3, translate swift3
|
||||
# sysmeta to s3api sysmeta here
|
||||
key = sysmeta_prefix(_server_type) + \
|
||||
key[len('x-%s-sysmeta-swift3-' % _server_type):]
|
||||
|
||||
if key not in sw_sysmeta_headers:
|
||||
# To avoid overwrite s3api sysmeta by older swift3
|
||||
# sysmeta set the key only when the key does not exist
|
||||
sw_sysmeta_headers[key] = val
|
||||
elif is_s3api_sysmeta(key, _server_type):
|
||||
sw_sysmeta_headers[key] = val
|
||||
else:
|
||||
sw_headers[key] = val
|
||||
|
||||
# Handle swift headers
|
||||
for key, val in sw_headers.iteritems():
|
||||
_key = key.lower()
|
||||
|
||||
if _key.startswith('x-object-meta-'):
|
||||
# Note that AWS ignores user-defined headers with '=' in the
|
||||
# header name. We translated underscores to '=5F' on the way
|
||||
# in, though.
|
||||
headers['x-amz-meta-' + _key[14:].replace('=5f', '_')] = val
|
||||
elif _key in ('content-length', 'content-type',
|
||||
'content-range', 'content-encoding',
|
||||
'content-disposition', 'content-language',
|
||||
'etag', 'last-modified', 'x-robots-tag',
|
||||
'cache-control', 'expires'):
|
||||
headers[key] = val
|
||||
elif _key == 'x-static-large-object':
|
||||
# for delete slo
|
||||
self.is_slo = config_true_value(val)
|
||||
|
||||
self.headers = headers
|
||||
# Used for pure swift header handling at the request layer
|
||||
self.sw_headers = sw_headers
|
||||
self.sysmeta_headers = sw_sysmeta_headers
|
||||
|
||||
@classmethod
|
||||
def from_swift_resp(cls, sw_resp):
|
||||
"""
|
||||
Create a new S3 response object based on the given Swift response.
|
||||
"""
|
||||
if sw_resp.app_iter:
|
||||
body = None
|
||||
app_iter = sw_resp.app_iter
|
||||
else:
|
||||
body = sw_resp.body
|
||||
app_iter = None
|
||||
|
||||
resp = cls(status=sw_resp.status, headers=sw_resp.headers,
|
||||
request=sw_resp.request, body=body, app_iter=app_iter,
|
||||
conditional_response=sw_resp.conditional_response)
|
||||
resp.environ.update(sw_resp.environ)
|
||||
|
||||
return resp
|
||||
|
||||
def append_copy_resp_body(self, controller_name, last_modified):
|
||||
elem = Element('Copy%sResult' % controller_name)
|
||||
SubElement(elem, 'LastModified').text = last_modified
|
||||
SubElement(elem, 'ETag').text = '"%s"' % self.etag
|
||||
self.headers['Content-Type'] = 'application/xml'
|
||||
self.body = tostring(elem)
|
||||
self.etag = None
|
||||
|
||||
|
||||
HTTPOk = partial(S3Response, status=200)
|
||||
HTTPCreated = partial(S3Response, status=201)
|
||||
HTTPAccepted = partial(S3Response, status=202)
|
||||
HTTPNoContent = partial(S3Response, status=204)
|
||||
HTTPPartialContent = partial(S3Response, status=206)
|
||||
|
||||
|
||||
class ErrorResponse(S3ResponseBase, swob.HTTPException):
|
||||
"""
|
||||
S3 error object.
|
||||
|
||||
Reference information about S3 errors is available at:
|
||||
http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
|
||||
"""
|
||||
_status = ''
|
||||
_msg = ''
|
||||
_code = ''
|
||||
|
||||
def __init__(self, msg=None, *args, **kwargs):
|
||||
if msg:
|
||||
self._msg = msg
|
||||
if not self._code:
|
||||
self._code = self.__class__.__name__
|
||||
|
||||
self.info = kwargs.copy()
|
||||
for reserved_key in ('headers', 'body'):
|
||||
if self.info.get(reserved_key):
|
||||
del(self.info[reserved_key])
|
||||
|
||||
swob.HTTPException.__init__(self, status=self._status,
|
||||
app_iter=self._body_iter(),
|
||||
content_type='application/xml', *args,
|
||||
**kwargs)
|
||||
self.headers = HeaderKeyDict(self.headers)
|
||||
|
||||
def _body_iter(self):
|
||||
error_elem = Element('Error')
|
||||
SubElement(error_elem, 'Code').text = self._code
|
||||
SubElement(error_elem, 'Message').text = self._msg
|
||||
if 'swift.trans_id' in self.environ:
|
||||
request_id = self.environ['swift.trans_id']
|
||||
SubElement(error_elem, 'RequestId').text = request_id
|
||||
|
||||
self._dict_to_etree(error_elem, self.info)
|
||||
|
||||
yield tostring(error_elem, use_s3ns=False)
|
||||
|
||||
def _dict_to_etree(self, parent, d):
|
||||
for key, value in d.items():
|
||||
tag = re.sub('\W', '', snake_to_camel(key))
|
||||
elem = SubElement(parent, tag)
|
||||
|
||||
if isinstance(value, (dict, DictMixin)):
|
||||
self._dict_to_etree(elem, value)
|
||||
else:
|
||||
try:
|
||||
elem.text = str(value)
|
||||
except ValueError:
|
||||
# We set an invalid string for XML.
|
||||
elem.text = '(invalid string)'
|
||||
|
||||
|
||||
class AccessDenied(ErrorResponse):
|
||||
_status = '403 Forbidden'
|
||||
_msg = 'Access Denied.'
|
||||
|
||||
|
||||
class AccountProblem(ErrorResponse):
|
||||
_status = '403 Forbidden'
|
||||
_msg = 'There is a problem with your AWS account that prevents the ' \
|
||||
'operation from completing successfully.'
|
||||
|
||||
|
||||
class AmbiguousGrantByEmailAddress(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'The e-mail address you provided is associated with more than ' \
|
||||
'one account.'
|
||||
|
||||
|
||||
class AuthorizationHeaderMalformed(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'The authorization header is malformed; the authorization ' \
|
||||
'header requires three components: Credential, SignedHeaders, ' \
|
||||
'and Signature.'
|
||||
|
||||
|
||||
class AuthorizationQueryParametersError(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
|
||||
|
||||
class BadDigest(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'The Content-MD5 you specified did not match what we received.'
|
||||
|
||||
|
||||
class BucketAlreadyExists(ErrorResponse):
|
||||
_status = '409 Conflict'
|
||||
_msg = 'The requested bucket name is not available. The bucket ' \
|
||||
'namespace is shared by all users of the system. Please select a ' \
|
||||
'different name and try again.'
|
||||
|
||||
def __init__(self, bucket, msg=None, *args, **kwargs):
|
||||
ErrorResponse.__init__(self, msg, bucket_name=bucket, *args, **kwargs)
|
||||
|
||||
|
||||
class BucketAlreadyOwnedByYou(ErrorResponse):
|
||||
_status = '409 Conflict'
|
||||
_msg = 'Your previous request to create the named bucket succeeded and ' \
|
||||
'you already own it.'
|
||||
|
||||
def __init__(self, bucket, msg=None, *args, **kwargs):
|
||||
ErrorResponse.__init__(self, msg, bucket_name=bucket, *args, **kwargs)
|
||||
|
||||
|
||||
class BucketNotEmpty(ErrorResponse):
|
||||
_status = '409 Conflict'
|
||||
_msg = 'The bucket you tried to delete is not empty'
|
||||
|
||||
|
||||
class CredentialsNotSupported(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'This request does not support credentials.'
|
||||
|
||||
|
||||
class CrossLocationLoggingProhibited(ErrorResponse):
|
||||
_status = '403 Forbidden'
|
||||
_msg = 'Cross location logging not allowed. Buckets in one geographic ' \
|
||||
'location cannot log information to a bucket in another location.'
|
||||
|
||||
|
||||
class EntityTooSmall(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'Your proposed upload is smaller than the minimum allowed object ' \
|
||||
'size.'
|
||||
|
||||
|
||||
class EntityTooLarge(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'Your proposed upload exceeds the maximum allowed object size.'
|
||||
|
||||
|
||||
class ExpiredToken(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'The provided token has expired.'
|
||||
|
||||
|
||||
class IllegalVersioningConfigurationException(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'The Versioning configuration specified in the request is invalid.'
|
||||
|
||||
|
||||
class IncompleteBody(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'You did not provide the number of bytes specified by the ' \
|
||||
'Content-Length HTTP header.'
|
||||
|
||||
|
||||
class IncorrectNumberOfFilesInPostRequest(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'POST requires exactly one file upload per request.'
|
||||
|
||||
|
||||
class InlineDataTooLarge(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'Inline data exceeds the maximum allowed size.'
|
||||
|
||||
|
||||
class InternalError(ErrorResponse):
|
||||
_status = '500 Internal Server Error'
|
||||
_msg = 'We encountered an internal error. Please try again.'
|
||||
|
||||
|
||||
class InvalidAccessKeyId(ErrorResponse):
|
||||
_status = '403 Forbidden'
|
||||
_msg = 'The AWS Access Key Id you provided does not exist in our records.'
|
||||
|
||||
|
||||
class InvalidArgument(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'Invalid Argument.'
|
||||
|
||||
def __init__(self, name, value, msg=None, *args, **kwargs):
|
||||
ErrorResponse.__init__(self, msg, argument_name=name,
|
||||
argument_value=value, *args, **kwargs)
|
||||
|
||||
|
||||
class InvalidBucketName(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'The specified bucket is not valid.'
|
||||
|
||||
def __init__(self, bucket, msg=None, *args, **kwargs):
|
||||
ErrorResponse.__init__(self, msg, bucket_name=bucket, *args, **kwargs)
|
||||
|
||||
|
||||
class InvalidBucketState(ErrorResponse):
|
||||
_status = '409 Conflict'
|
||||
_msg = 'The request is not valid with the current state of the bucket.'
|
||||
|
||||
|
||||
class InvalidDigest(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'The Content-MD5 you specified was an invalid.'
|
||||
|
||||
|
||||
class InvalidLocationConstraint(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'The specified location constraint is not valid.'
|
||||
|
||||
|
||||
class InvalidObjectState(ErrorResponse):
|
||||
_status = '403 Forbidden'
|
||||
_msg = 'The operation is not valid for the current state of the object.'
|
||||
|
||||
|
||||
class InvalidPart(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'One or more of the specified parts could not be found. The part ' \
|
||||
'might not have been uploaded, or the specified entity tag might ' \
|
||||
'not have matched the part\'s entity tag.'
|
||||
|
||||
|
||||
class InvalidPartOrder(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'The list of parts was not in ascending order.Parts list must ' \
|
||||
'specified in order by part number.'
|
||||
|
||||
|
||||
class InvalidPayer(ErrorResponse):
|
||||
_status = '403 Forbidden'
|
||||
_msg = 'All access to this object has been disabled.'
|
||||
|
||||
|
||||
class InvalidPolicyDocument(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'The content of the form does not meet the conditions specified ' \
|
||||
'in the policy document.'
|
||||
|
||||
|
||||
class InvalidRange(ErrorResponse):
|
||||
_status = '416 Requested Range Not Satisfiable'
|
||||
_msg = 'The requested range cannot be satisfied.'
|
||||
|
||||
|
||||
class InvalidRequest(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'Invalid Request.'
|
||||
|
||||
|
||||
class InvalidSecurity(ErrorResponse):
|
||||
_status = '403 Forbidden'
|
||||
_msg = 'The provided security credentials are not valid.'
|
||||
|
||||
|
||||
class InvalidSOAPRequest(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'The SOAP request body is invalid.'
|
||||
|
||||
|
||||
class InvalidStorageClass(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'The storage class you specified is not valid.'
|
||||
|
||||
|
||||
class InvalidTargetBucketForLogging(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'The target bucket for logging does not exist, is not owned by ' \
|
||||
'you, or does not have the appropriate grants for the ' \
|
||||
'log-delivery group.'
|
||||
|
||||
def __init__(self, bucket, msg=None, *args, **kwargs):
|
||||
ErrorResponse.__init__(self, msg, target_bucket=bucket, *args,
|
||||
**kwargs)
|
||||
|
||||
|
||||
class InvalidToken(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'The provided token is malformed or otherwise invalid.'
|
||||
|
||||
|
||||
class InvalidURI(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'Couldn\'t parse the specified URI.'
|
||||
|
||||
def __init__(self, uri, msg=None, *args, **kwargs):
|
||||
ErrorResponse.__init__(self, msg, uri=uri, *args, **kwargs)
|
||||
|
||||
|
||||
class KeyTooLong(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'Your key is too long.'
|
||||
|
||||
|
||||
class MalformedACLError(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'The XML you provided was not well-formed or did not validate ' \
|
||||
'against our published schema.'
|
||||
|
||||
|
||||
class MalformedPOSTRequest(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'The body of your POST request is not well-formed ' \
|
||||
'multipart/form-data.'
|
||||
|
||||
|
||||
class MalformedXML(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'The XML you provided was not well-formed or did not validate ' \
|
||||
'against our published schema.'
|
||||
|
||||
|
||||
class MaxMessageLengthExceeded(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'Your request was too big.'
|
||||
|
||||
|
||||
class MaxPostPreDataLengthExceededError(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'Your POST request fields preceding the upload file were too large.'
|
||||
|
||||
|
||||
class MetadataTooLarge(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'Your metadata headers exceed the maximum allowed metadata size.'
|
||||
|
||||
|
||||
class MethodNotAllowed(ErrorResponse):
|
||||
_status = '405 Method Not Allowed'
|
||||
_msg = 'The specified method is not allowed against this resource.'
|
||||
|
||||
def __init__(self, method, resource_type, msg=None, *args, **kwargs):
|
||||
ErrorResponse.__init__(self, msg, method=method,
|
||||
resource_type=resource_type, *args, **kwargs)
|
||||
|
||||
|
||||
class MissingContentLength(ErrorResponse):
|
||||
_status = '411 Length Required'
|
||||
_msg = 'You must provide the Content-Length HTTP header.'
|
||||
|
||||
|
||||
class MissingRequestBodyError(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'Request body is empty.'
|
||||
|
||||
|
||||
class MissingSecurityElement(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'The SOAP 1.1 request is missing a security element.'
|
||||
|
||||
|
||||
class MissingSecurityHeader(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'Your request was missing a required header.'
|
||||
|
||||
|
||||
class NoLoggingStatusForKey(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'There is no such thing as a logging status sub-resource for a key.'
|
||||
|
||||
|
||||
class NoSuchBucket(ErrorResponse):
|
||||
_status = '404 Not Found'
|
||||
_msg = 'The specified bucket does not exist.'
|
||||
|
||||
def __init__(self, bucket, msg=None, *args, **kwargs):
|
||||
if not bucket:
|
||||
raise InternalError()
|
||||
ErrorResponse.__init__(self, msg, bucket_name=bucket, *args, **kwargs)
|
||||
|
||||
|
||||
class NoSuchKey(ErrorResponse):
|
||||
_status = '404 Not Found'
|
||||
_msg = 'The specified key does not exist.'
|
||||
|
||||
def __init__(self, key, msg=None, *args, **kwargs):
|
||||
if not key:
|
||||
raise InternalError()
|
||||
ErrorResponse.__init__(self, msg, key=key, *args, **kwargs)
|
||||
|
||||
|
||||
class NoSuchLifecycleConfiguration(ErrorResponse):
|
||||
_status = '404 Not Found'
|
||||
_msg = 'The lifecycle configuration does not exist. .'
|
||||
|
||||
|
||||
class NoSuchUpload(ErrorResponse):
|
||||
_status = '404 Not Found'
|
||||
_msg = 'The specified multipart upload does not exist. The upload ID ' \
|
||||
'might be invalid, or the multipart upload might have been ' \
|
||||
'aborted or completed.'
|
||||
|
||||
|
||||
class NoSuchVersion(ErrorResponse):
|
||||
_status = '404 Not Found'
|
||||
_msg = 'The specified version does not exist.'
|
||||
|
||||
def __init__(self, key, version_id, msg=None, *args, **kwargs):
|
||||
if not key:
|
||||
raise InternalError()
|
||||
ErrorResponse.__init__(self, msg, key=key, version_id=version_id,
|
||||
*args, **kwargs)
|
||||
|
||||
|
||||
# NotImplemented is a python built-in constant. Use S3NotImplemented instead.
|
||||
class S3NotImplemented(ErrorResponse):
|
||||
_status = '501 Not Implemented'
|
||||
_msg = 'Not implemented.'
|
||||
_code = 'NotImplemented'
|
||||
|
||||
|
||||
class NotSignedUp(ErrorResponse):
|
||||
_status = '403 Forbidden'
|
||||
_msg = 'Your account is not signed up for the Amazon S3 service.'
|
||||
|
||||
|
||||
class NotSuchBucketPolicy(ErrorResponse):
|
||||
_status = '404 Not Found'
|
||||
_msg = 'The specified bucket does not have a bucket policy.'
|
||||
|
||||
|
||||
class OperationAborted(ErrorResponse):
|
||||
_status = '409 Conflict'
|
||||
_msg = 'A conflicting conditional operation is currently in progress ' \
|
||||
'against this resource. Please try again.'
|
||||
|
||||
|
||||
class PermanentRedirect(ErrorResponse):
|
||||
_status = '301 Moved Permanently'
|
||||
_msg = 'The bucket you are attempting to access must be addressed using ' \
|
||||
'the specified endpoint. Please send all future requests to this ' \
|
||||
'endpoint.'
|
||||
|
||||
|
||||
class PreconditionFailed(ErrorResponse):
|
||||
_status = '412 Precondition Failed'
|
||||
_msg = 'At least one of the preconditions you specified did not hold.'
|
||||
|
||||
|
||||
class Redirect(ErrorResponse):
|
||||
_status = '307 Moved Temporarily'
|
||||
_msg = 'Temporary redirect.'
|
||||
|
||||
|
||||
class RestoreAlreadyInProgress(ErrorResponse):
|
||||
_status = '409 Conflict'
|
||||
_msg = 'Object restore is already in progress.'
|
||||
|
||||
|
||||
class RequestIsNotMultiPartContent(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'Bucket POST must be of the enclosure-type multipart/form-data.'
|
||||
|
||||
|
||||
class RequestTimeout(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'Your socket connection to the server was not read from or ' \
|
||||
'written to within the timeout period.'
|
||||
|
||||
|
||||
class RequestTimeTooSkewed(ErrorResponse):
|
||||
_status = '403 Forbidden'
|
||||
_msg = 'The difference between the request time and the current time ' \
|
||||
'is too large.'
|
||||
|
||||
|
||||
class RequestTorrentOfBucketError(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'Requesting the torrent file of a bucket is not permitted.'
|
||||
|
||||
|
||||
class SignatureDoesNotMatch(ErrorResponse):
|
||||
_status = '403 Forbidden'
|
||||
_msg = 'The request signature we calculated does not match the ' \
|
||||
'signature you provided. Check your key and signing method.'
|
||||
|
||||
|
||||
class ServiceUnavailable(ErrorResponse):
|
||||
_status = '503 Service Unavailable'
|
||||
_msg = 'Please reduce your request rate.'
|
||||
|
||||
|
||||
class SlowDown(ErrorResponse):
|
||||
_status = '503 Slow Down'
|
||||
_msg = 'Please reduce your request rate.'
|
||||
|
||||
|
||||
class TemporaryRedirect(ErrorResponse):
|
||||
_status = '307 Moved Temporarily'
|
||||
_msg = 'You are being redirected to the bucket while DNS updates.'
|
||||
|
||||
|
||||
class TokenRefreshRequired(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'The provided token must be refreshed.'
|
||||
|
||||
|
||||
class TooManyBuckets(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'You have attempted to create more buckets than allowed.'
|
||||
|
||||
|
||||
class UnexpectedContent(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'This request does not support content.'
|
||||
|
||||
|
||||
class UnresolvableGrantByEmailAddress(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'The e-mail address you provided does not match any account on ' \
|
||||
'record.'
|
||||
|
||||
|
||||
class UserKeyMustBeSpecified(ErrorResponse):
|
||||
_status = '400 Bad Request'
|
||||
_msg = 'The bucket POST must contain the specified field name. If it is ' \
|
||||
'specified, please check the order of the fields.'
|
324
swift/common/middleware/s3api/s3token.py
Normal file
324
swift/common/middleware/s3api/s3token.py
Normal file
@ -0,0 +1,324 @@
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011,2012 Akira YOSHIYAMA <akirayoshiyama@gmail.com>
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# This source code is based ./auth_token.py and ./ec2_token.py.
|
||||
# See them for their copyright.
|
||||
|
||||
"""
|
||||
-------------------
|
||||
S3 Token Middleware
|
||||
-------------------
|
||||
s3token middleware is for authentication with s3api + keystone.
|
||||
This middleware:
|
||||
|
||||
* Gets a request from the s3api middleware with an S3 Authorization
|
||||
access key.
|
||||
* Validates s3 token with Keystone.
|
||||
* Transforms the account name to AUTH_%(tenant_name).
|
||||
|
||||
"""
|
||||
|
||||
import base64
|
||||
import json
|
||||
|
||||
import requests
|
||||
import six
|
||||
from six.moves import urllib
|
||||
|
||||
from swift.common.swob import Request, HTTPBadRequest, HTTPUnauthorized, \
|
||||
HTTPException
|
||||
from swift.common.utils import config_true_value, split_path, get_logger
|
||||
from swift.common.wsgi import ConfigFileError
|
||||
|
||||
|
||||
PROTOCOL_NAME = 'S3 Token Authentication'
|
||||
|
||||
# Headers to purge if they came from (or may have come from) the client
|
||||
KEYSTONE_AUTH_HEADERS = (
|
||||
'X-Identity-Status', 'X-Service-Identity-Status',
|
||||
'X-Domain-Id', 'X-Service-Domain-Id',
|
||||
'X-Domain-Name', 'X-Service-Domain-Name',
|
||||
'X-Project-Id', 'X-Service-Project-Id',
|
||||
'X-Project-Name', 'X-Service-Project-Name',
|
||||
'X-Project-Domain-Id', 'X-Service-Project-Domain-Id',
|
||||
'X-Project-Domain-Name', 'X-Service-Project-Domain-Name',
|
||||
'X-User-Id', 'X-Service-User-Id',
|
||||
'X-User-Name', 'X-Service-User-Name',
|
||||
'X-User-Domain-Id', 'X-Service-User-Domain-Id',
|
||||
'X-User-Domain-Name', 'X-Service-User-Domain-Name',
|
||||
'X-Roles', 'X-Service-Roles',
|
||||
'X-Is-Admin-Project',
|
||||
'X-Service-Catalog',
|
||||
# Deprecated headers, too...
|
||||
'X-Tenant-Id',
|
||||
'X-Tenant-Name',
|
||||
'X-Tenant',
|
||||
'X-User',
|
||||
'X-Role',
|
||||
)
|
||||
|
||||
|
||||
def parse_v2_response(token):
|
||||
access_info = token['access']
|
||||
headers = {
|
||||
'X-Identity-Status': 'Confirmed',
|
||||
'X-Roles': ','.join(r['name']
|
||||
for r in access_info['user']['roles']),
|
||||
'X-User-Id': access_info['user']['id'],
|
||||
'X-User-Name': access_info['user']['name'],
|
||||
'X-Tenant-Id': access_info['token']['tenant']['id'],
|
||||
'X-Tenant-Name': access_info['token']['tenant']['name'],
|
||||
'X-Project-Id': access_info['token']['tenant']['id'],
|
||||
'X-Project-Name': access_info['token']['tenant']['name'],
|
||||
}
|
||||
return (
|
||||
headers,
|
||||
access_info['token'].get('id'),
|
||||
access_info['token']['tenant'])
|
||||
|
||||
|
||||
def parse_v3_response(token):
|
||||
token = token['token']
|
||||
headers = {
|
||||
'X-Identity-Status': 'Confirmed',
|
||||
'X-Roles': ','.join(r['name']
|
||||
for r in token['roles']),
|
||||
'X-User-Id': token['user']['id'],
|
||||
'X-User-Name': token['user']['name'],
|
||||
'X-User-Domain-Id': token['user']['domain']['id'],
|
||||
'X-User-Domain-Name': token['user']['domain']['name'],
|
||||
'X-Tenant-Id': token['project']['id'],
|
||||
'X-Tenant-Name': token['project']['name'],
|
||||
'X-Project-Id': token['project']['id'],
|
||||
'X-Project-Name': token['project']['name'],
|
||||
'X-Project-Domain-Id': token['project']['domain']['id'],
|
||||
'X-Project-Domain-Name': token['project']['domain']['name'],
|
||||
}
|
||||
return headers, None, token['project']
|
||||
|
||||
|
||||
class S3Token(object):
|
||||
"""Middleware that handles S3 authentication."""
|
||||
|
||||
def __init__(self, app, conf):
|
||||
"""Common initialization code."""
|
||||
self._app = app
|
||||
self._logger = get_logger(
|
||||
conf, log_route=conf.get('log_name', 's3token'))
|
||||
self._logger.debug('Starting the %s component', PROTOCOL_NAME)
|
||||
self._timeout = float(conf.get('http_timeout', '10.0'))
|
||||
if not (0 < self._timeout <= 60):
|
||||
raise ValueError('http_timeout must be between 0 and 60 seconds')
|
||||
self._reseller_prefix = conf.get('reseller_prefix', 'AUTH_')
|
||||
self._delay_auth_decision = config_true_value(
|
||||
conf.get('delay_auth_decision'))
|
||||
|
||||
# where to find the auth service (we use this to validate tokens)
|
||||
self._request_uri = conf.get('auth_uri', '').rstrip('/') + '/s3tokens'
|
||||
parsed = urllib.parse.urlsplit(self._request_uri)
|
||||
if not parsed.scheme or not parsed.hostname:
|
||||
raise ConfigFileError(
|
||||
'Invalid auth_uri; must include scheme and host')
|
||||
if parsed.scheme not in ('http', 'https'):
|
||||
raise ConfigFileError(
|
||||
'Invalid auth_uri; scheme must be http or https')
|
||||
if parsed.query or parsed.fragment or '@' in parsed.netloc:
|
||||
raise ConfigFileError('Invalid auth_uri; must not include '
|
||||
'username, query, or fragment')
|
||||
|
||||
# SSL
|
||||
insecure = config_true_value(conf.get('insecure'))
|
||||
cert_file = conf.get('certfile')
|
||||
key_file = conf.get('keyfile')
|
||||
|
||||
if insecure:
|
||||
self._verify = False
|
||||
elif cert_file and key_file:
|
||||
self._verify = (cert_file, key_file)
|
||||
elif cert_file:
|
||||
self._verify = cert_file
|
||||
else:
|
||||
self._verify = None
|
||||
|
||||
def _deny_request(self, code):
|
||||
error_cls, message = {
|
||||
'AccessDenied': (HTTPUnauthorized, 'Access denied'),
|
||||
'InvalidURI': (HTTPBadRequest,
|
||||
'Could not parse the specified URI'),
|
||||
}[code]
|
||||
resp = error_cls(content_type='text/xml')
|
||||
error_msg = ('<?xml version="1.0" encoding="UTF-8"?>\r\n'
|
||||
'<Error>\r\n <Code>%s</Code>\r\n '
|
||||
'<Message>%s</Message>\r\n</Error>\r\n' %
|
||||
(code, message))
|
||||
if six.PY3:
|
||||
error_msg = error_msg.encode()
|
||||
resp.body = error_msg
|
||||
return resp
|
||||
|
||||
def _json_request(self, creds_json):
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
try:
|
||||
response = requests.post(self._request_uri,
|
||||
headers=headers, data=creds_json,
|
||||
verify=self._verify,
|
||||
timeout=self._timeout)
|
||||
except requests.exceptions.RequestException as e:
|
||||
self._logger.info('HTTP connection exception: %s', e)
|
||||
raise self._deny_request('InvalidURI')
|
||||
|
||||
if response.status_code < 200 or response.status_code >= 300:
|
||||
self._logger.debug('Keystone reply error: status=%s reason=%s',
|
||||
response.status_code, response.reason)
|
||||
raise self._deny_request('AccessDenied')
|
||||
|
||||
return response
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
"""Handle incoming request. authenticate and send downstream."""
|
||||
req = Request(environ)
|
||||
self._logger.debug('Calling S3Token middleware.')
|
||||
|
||||
# Always drop auth headers if we're first in the pipeline
|
||||
if 'keystone.token_info' not in req.environ:
|
||||
req.headers.update({h: None for h in KEYSTONE_AUTH_HEADERS})
|
||||
|
||||
try:
|
||||
parts = split_path(req.path, 1, 4, True)
|
||||
version, account, container, obj = parts
|
||||
except ValueError:
|
||||
msg = 'Not a path query: %s, skipping.' % req.path
|
||||
self._logger.debug(msg)
|
||||
return self._app(environ, start_response)
|
||||
|
||||
# Read request signature and access id.
|
||||
s3_auth_details = req.environ.get('s3api.auth_details')
|
||||
if not s3_auth_details:
|
||||
msg = 'No authorization details from s3api. skipping.'
|
||||
self._logger.debug(msg)
|
||||
return self._app(environ, start_response)
|
||||
|
||||
access = s3_auth_details['access_key']
|
||||
if isinstance(access, six.binary_type):
|
||||
access = access.decode('utf-8')
|
||||
|
||||
signature = s3_auth_details['signature']
|
||||
if isinstance(signature, six.binary_type):
|
||||
signature = signature.decode('utf-8')
|
||||
|
||||
string_to_sign = s3_auth_details['string_to_sign']
|
||||
if isinstance(string_to_sign, six.text_type):
|
||||
string_to_sign = string_to_sign.encode('utf-8')
|
||||
token = base64.urlsafe_b64encode(string_to_sign).encode('ascii')
|
||||
|
||||
# NOTE(chmou): This is to handle the special case with nova
|
||||
# when we have the option s3_affix_tenant. We will force it to
|
||||
# connect to another account than the one
|
||||
# authenticated. Before people start getting worried about
|
||||
# security, I should point that we are connecting with
|
||||
# username/token specified by the user but instead of
|
||||
# connecting to its own account we will force it to go to an
|
||||
# another account. In a normal scenario if that user don't
|
||||
# have the reseller right it will just fail but since the
|
||||
# reseller account can connect to every account it is allowed
|
||||
# by the swift_auth middleware.
|
||||
force_tenant = None
|
||||
if ':' in access:
|
||||
access, force_tenant = access.split(':')
|
||||
|
||||
# Authenticate request.
|
||||
creds = {'credentials': {'access': access,
|
||||
'token': token,
|
||||
'signature': signature}}
|
||||
creds_json = json.dumps(creds)
|
||||
self._logger.debug('Connecting to Keystone sending this JSON: %s',
|
||||
creds_json)
|
||||
# NOTE(vish): We could save a call to keystone by having
|
||||
# keystone return token, tenant, user, and roles
|
||||
# from this call.
|
||||
#
|
||||
# NOTE(chmou): We still have the same problem we would need to
|
||||
# change token_auth to detect if we already
|
||||
# identified and not doing a second query and just
|
||||
# pass it through to swiftauth in this case.
|
||||
try:
|
||||
# NB: requests.Response, not swob.Response
|
||||
resp = self._json_request(creds_json)
|
||||
except HTTPException as e_resp:
|
||||
if self._delay_auth_decision:
|
||||
msg = 'Received error, deferring rejection based on error: %s'
|
||||
self._logger.debug(msg, e_resp.status)
|
||||
return self._app(environ, start_response)
|
||||
else:
|
||||
msg = 'Received error, rejecting request with error: %s'
|
||||
self._logger.debug(msg, e_resp.status)
|
||||
# NB: swob.Response, not requests.Response
|
||||
return e_resp(environ, start_response)
|
||||
|
||||
self._logger.debug('Keystone Reply: Status: %d, Output: %s',
|
||||
resp.status_code, resp.content)
|
||||
|
||||
try:
|
||||
token = resp.json()
|
||||
if 'access' in token:
|
||||
headers, token_id, tenant = parse_v2_response(token)
|
||||
elif 'token' in token:
|
||||
headers, token_id, tenant = parse_v3_response(token)
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
# Populate the environment similar to auth_token,
|
||||
# so we don't have to contact Keystone again.
|
||||
#
|
||||
# Note that although the strings are unicode following json
|
||||
# deserialization, Swift's HeaderEnvironProxy handles ensuring
|
||||
# they're stored as native strings
|
||||
req.headers.update(headers)
|
||||
req.environ['keystone.token_info'] = token
|
||||
except (ValueError, KeyError, TypeError):
|
||||
if self._delay_auth_decision:
|
||||
error = ('Error on keystone reply: %d %s - '
|
||||
'deferring rejection downstream')
|
||||
self._logger.debug(error, resp.status_code, resp.content)
|
||||
return self._app(environ, start_response)
|
||||
else:
|
||||
error = ('Error on keystone reply: %d %s - '
|
||||
'rejecting request')
|
||||
self._logger.debug(error, resp.status_code, resp.content)
|
||||
return self._deny_request('InvalidURI')(
|
||||
environ, start_response)
|
||||
|
||||
req.headers['X-Auth-Token'] = token_id
|
||||
tenant_to_connect = force_tenant or tenant['id']
|
||||
if six.PY2 and isinstance(tenant_to_connect, six.text_type):
|
||||
tenant_to_connect = tenant_to_connect.encode('utf-8')
|
||||
self._logger.debug('Connecting with tenant: %s', tenant_to_connect)
|
||||
new_tenant_name = '%s%s' % (self._reseller_prefix, tenant_to_connect)
|
||||
environ['PATH_INFO'] = environ['PATH_INFO'].replace(account,
|
||||
new_tenant_name)
|
||||
return self._app(environ, start_response)
|
||||
|
||||
|
||||
def filter_factory(global_conf, **local_conf):
|
||||
"""Returns a WSGI filter app for use with paste.deploy."""
|
||||
conf = global_conf.copy()
|
||||
conf.update(local_conf)
|
||||
|
||||
def auth_filter(app):
|
||||
return S3Token(app, conf)
|
||||
return auth_filter
|
@ -0,0 +1,16 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<grammar xmlns="http://relaxng.org/ns/structure/1.0">
|
||||
<include href="common.rng"/>
|
||||
<start>
|
||||
<element name="AccessControlPolicy">
|
||||
<interleave>
|
||||
<element name="Owner">
|
||||
<ref name="CanonicalUser"/>
|
||||
</element>
|
||||
<element name="AccessControlList">
|
||||
<ref name="AccessControlList"/>
|
||||
</element>
|
||||
</interleave>
|
||||
</element>
|
||||
</start>
|
||||
</grammar>
|
@ -0,0 +1,25 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
|
||||
<include href="common.rng"/>
|
||||
<start>
|
||||
<element name="BucketLoggingStatus">
|
||||
<optional>
|
||||
<element name="LoggingEnabled">
|
||||
<interleave>
|
||||
<element name="TargetBucket">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="TargetPrefix">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<optional>
|
||||
<element name="TargetGrants">
|
||||
<ref name="AccessControlList"/>
|
||||
</element>
|
||||
</optional>
|
||||
</interleave>
|
||||
</element>
|
||||
</optional>
|
||||
</element>
|
||||
</start>
|
||||
</grammar>
|
66
swift/common/middleware/s3api/schema/common.rng
Normal file
66
swift/common/middleware/s3api/schema/common.rng
Normal file
@ -0,0 +1,66 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<grammar xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
|
||||
<define name="CanonicalUser">
|
||||
<interleave>
|
||||
<element name="ID">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<optional>
|
||||
<element name="DisplayName">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
</interleave>
|
||||
</define>
|
||||
<define name="StorageClass">
|
||||
<choice>
|
||||
<value>STANDARD</value>
|
||||
<value>REDUCED_REDUNDANCY</value>
|
||||
<value>GLACIER</value>
|
||||
<value>UNKNOWN</value>
|
||||
</choice>
|
||||
</define>
|
||||
<define name="AccessControlList">
|
||||
<zeroOrMore>
|
||||
<element name="Grant">
|
||||
<interleave>
|
||||
<element name="Grantee">
|
||||
<choice>
|
||||
<group>
|
||||
<attribute name="xsi:type">
|
||||
<value>AmazonCustomerByEmail</value>
|
||||
</attribute>
|
||||
<element name="EmailAddress">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</group>
|
||||
<group>
|
||||
<attribute name="xsi:type">
|
||||
<value>CanonicalUser</value>
|
||||
</attribute>
|
||||
<ref name="CanonicalUser"/>
|
||||
</group>
|
||||
<group>
|
||||
<attribute name="xsi:type">
|
||||
<value>Group</value>
|
||||
</attribute>
|
||||
<element name="URI">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</group>
|
||||
</choice>
|
||||
</element>
|
||||
<element name="Permission">
|
||||
<choice>
|
||||
<value>READ</value>
|
||||
<value>WRITE</value>
|
||||
<value>READ_ACP</value>
|
||||
<value>WRITE_ACP</value>
|
||||
<value>FULL_CONTROL</value>
|
||||
</choice>
|
||||
</element>
|
||||
</interleave>
|
||||
</element>
|
||||
</zeroOrMore>
|
||||
</define>
|
||||
</grammar>
|
@ -0,0 +1,19 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
|
||||
<start>
|
||||
<element name="CompleteMultipartUpload">
|
||||
<oneOrMore>
|
||||
<element name="Part">
|
||||
<interleave>
|
||||
<element name="PartNumber">
|
||||
<data type="int"/>
|
||||
</element>
|
||||
<element name="ETag">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</interleave>
|
||||
</element>
|
||||
</oneOrMore>
|
||||
</element>
|
||||
</start>
|
||||
</grammar>
|
@ -0,0 +1,19 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
|
||||
<start>
|
||||
<element name="CompleteMultipartUploadResult">
|
||||
<element name="Location">
|
||||
<data type="anyURI"/>
|
||||
</element>
|
||||
<element name="Bucket">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="Key">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="ETag">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</element>
|
||||
</start>
|
||||
</grammar>
|
13
swift/common/middleware/s3api/schema/copy_object_result.rng
Normal file
13
swift/common/middleware/s3api/schema/copy_object_result.rng
Normal file
@ -0,0 +1,13 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
|
||||
<start>
|
||||
<element name="CopyObjectResult">
|
||||
<element name="LastModified">
|
||||
<data type="dateTime"/>
|
||||
</element>
|
||||
<element name="ETag">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</element>
|
||||
</start>
|
||||
</grammar>
|
13
swift/common/middleware/s3api/schema/copy_part_result.rng
Normal file
13
swift/common/middleware/s3api/schema/copy_part_result.rng
Normal file
@ -0,0 +1,13 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
|
||||
<start>
|
||||
<element name="CopyPartResult">
|
||||
<element name="LastModified">
|
||||
<data type="dateTime"/>
|
||||
</element>
|
||||
<element name="ETag">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</element>
|
||||
</start>
|
||||
</grammar>
|
@ -0,0 +1,11 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
|
||||
<start>
|
||||
<element>
|
||||
<anyName/>
|
||||
<element name="LocationConstraint">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</element>
|
||||
</start>
|
||||
</grammar>
|
28
swift/common/middleware/s3api/schema/delete.rng
Normal file
28
swift/common/middleware/s3api/schema/delete.rng
Normal file
@ -0,0 +1,28 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
|
||||
<start>
|
||||
<element name="Delete">
|
||||
<interleave>
|
||||
<optional>
|
||||
<element name="Quiet">
|
||||
<data type="boolean"/>
|
||||
</element>
|
||||
</optional>
|
||||
<oneOrMore>
|
||||
<element name="Object">
|
||||
<interleave>
|
||||
<element name="Key">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<optional>
|
||||
<element name="VersionId">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
</interleave>
|
||||
</element>
|
||||
</oneOrMore>
|
||||
</interleave>
|
||||
</element>
|
||||
</start>
|
||||
</grammar>
|
47
swift/common/middleware/s3api/schema/delete_result.rng
Normal file
47
swift/common/middleware/s3api/schema/delete_result.rng
Normal file
@ -0,0 +1,47 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
|
||||
<start>
|
||||
<element name="DeleteResult">
|
||||
<zeroOrMore>
|
||||
<choice>
|
||||
<element name="Deleted">
|
||||
<element name="Key">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<optional>
|
||||
<element name="VersionId">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
<optional>
|
||||
<element name="DeleteMarker">
|
||||
<data type="boolean"/>
|
||||
</element>
|
||||
</optional>
|
||||
<optional>
|
||||
<element name="DeleteMarkerVersionId">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
</element>
|
||||
<element name="Error">
|
||||
<element name="Key">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<optional>
|
||||
<element name="VersionId">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
<element name="Code">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="Message">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</element>
|
||||
</choice>
|
||||
</zeroOrMore>
|
||||
</element>
|
||||
</start>
|
||||
</grammar>
|
30
swift/common/middleware/s3api/schema/error.rng
Normal file
30
swift/common/middleware/s3api/schema/error.rng
Normal file
@ -0,0 +1,30 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
|
||||
<start>
|
||||
<element name="Error">
|
||||
<element name="Code">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="Message">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<zeroOrMore>
|
||||
<ref name="DebugInfo"/>
|
||||
</zeroOrMore>
|
||||
</element>
|
||||
</start>
|
||||
<define name="DebugInfo">
|
||||
<element>
|
||||
<anyName/>
|
||||
<zeroOrMore>
|
||||
<choice>
|
||||
<attribute>
|
||||
<anyName/>
|
||||
</attribute>
|
||||
<text/>
|
||||
<ref name="DebugInfo"/>
|
||||
</choice>
|
||||
</zeroOrMore>
|
||||
</element>
|
||||
</define>
|
||||
</grammar>
|
@ -0,0 +1,16 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
|
||||
<start>
|
||||
<element name="InitiateMultipartUploadResult">
|
||||
<element name="Bucket">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="Key">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="UploadId">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</element>
|
||||
</start>
|
||||
</grammar>
|
@ -0,0 +1,56 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
|
||||
<include href="common.rng"/>
|
||||
<start>
|
||||
<element name="LifecycleConfiguration">
|
||||
<oneOrMore>
|
||||
<element name="Rule">
|
||||
<interleave>
|
||||
<optional>
|
||||
<element name="ID">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
<element name="Prefix">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="Status">
|
||||
<choice>
|
||||
<value>Enabled</value>
|
||||
<value>Disabled</value>
|
||||
</choice>
|
||||
</element>
|
||||
<optional>
|
||||
<element name="Transition">
|
||||
<ref name="Transition"/>
|
||||
</element>
|
||||
</optional>
|
||||
<optional>
|
||||
<element name="Expiration">
|
||||
<ref name="Expiration"/>
|
||||
</element>
|
||||
</optional>
|
||||
</interleave>
|
||||
</element>
|
||||
</oneOrMore>
|
||||
</element>
|
||||
</start>
|
||||
<define name="Expiration">
|
||||
<choice>
|
||||
<element name="Days">
|
||||
<data type="int"/>
|
||||
</element>
|
||||
<element name="Date">
|
||||
<data type="dateTime"/>
|
||||
</element>
|
||||
</choice>
|
||||
</define>
|
||||
<define name="Transition">
|
||||
<interleave>
|
||||
<ref name="Expiration"/>
|
||||
<element name="StorageClass">
|
||||
<ref name="StorageClass"/>
|
||||
</element>
|
||||
</interleave>
|
||||
</define>
|
||||
</grammar>
|
@ -0,0 +1,23 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
|
||||
<include href="common.rng"/>
|
||||
<start>
|
||||
<element name="ListAllMyBucketsResult">
|
||||
<element name="Owner">
|
||||
<ref name="CanonicalUser"/>
|
||||
</element>
|
||||
<element name="Buckets">
|
||||
<zeroOrMore>
|
||||
<element name="Bucket">
|
||||
<element name="Name">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="CreationDate">
|
||||
<data type="dateTime"/>
|
||||
</element>
|
||||
</element>
|
||||
</zeroOrMore>
|
||||
</element>
|
||||
</element>
|
||||
</start>
|
||||
</grammar>
|
93
swift/common/middleware/s3api/schema/list_bucket_result.rng
Normal file
93
swift/common/middleware/s3api/schema/list_bucket_result.rng
Normal file
@ -0,0 +1,93 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
|
||||
<include href="common.rng"/>
|
||||
<start>
|
||||
<element name="ListBucketResult">
|
||||
<element name="Name">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="Prefix">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<choice>
|
||||
<group>
|
||||
<element name="Marker">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<optional>
|
||||
<element name="NextMarker">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
</group>
|
||||
<group>
|
||||
<optional>
|
||||
<element name="NextContinuationToken">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
<optional>
|
||||
<element name="ContinuationToken">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
<optional>
|
||||
<element name="StartAfter">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
<element name="KeyCount">
|
||||
<data type="int"/>
|
||||
</element>
|
||||
</group>
|
||||
</choice>
|
||||
<element name="MaxKeys">
|
||||
<data type="int"/>
|
||||
</element>
|
||||
<optional>
|
||||
<element name="EncodingType">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
<optional>
|
||||
<element name="Delimiter">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
<element name="IsTruncated">
|
||||
<data type="boolean"/>
|
||||
</element>
|
||||
<zeroOrMore>
|
||||
<element name="Contents">
|
||||
<element name="Key">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="LastModified">
|
||||
<data type="dateTime"/>
|
||||
</element>
|
||||
<element name="ETag">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="Size">
|
||||
<data type="long"/>
|
||||
</element>
|
||||
<optional>
|
||||
<element name="Owner">
|
||||
<ref name="CanonicalUser"/>
|
||||
</element>
|
||||
</optional>
|
||||
<element name="StorageClass">
|
||||
<ref name="StorageClass"/>
|
||||
</element>
|
||||
</element>
|
||||
</zeroOrMore>
|
||||
<zeroOrMore>
|
||||
<element name="CommonPrefixes">
|
||||
<element name="Prefix">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</element>
|
||||
</zeroOrMore>
|
||||
</element>
|
||||
</start>
|
||||
</grammar>
|
@ -0,0 +1,73 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
|
||||
<include href="common.rng"/>
|
||||
<start>
|
||||
<element name="ListMultipartUploadsResult">
|
||||
<element name="Bucket">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="KeyMarker">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="UploadIdMarker">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="NextKeyMarker">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="NextUploadIdMarker">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<optional>
|
||||
<element name="Delimiter">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
<optional>
|
||||
<element name="Prefix">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
<element name="MaxUploads">
|
||||
<data type="int"/>
|
||||
</element>
|
||||
<optional>
|
||||
<element name="EncodingType">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
<element name="IsTruncated">
|
||||
<data type="boolean"/>
|
||||
</element>
|
||||
<zeroOrMore>
|
||||
<element name="Upload">
|
||||
<element name="Key">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="UploadId">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="Initiator">
|
||||
<ref name="CanonicalUser"/>
|
||||
</element>
|
||||
<element name="Owner">
|
||||
<ref name="CanonicalUser"/>
|
||||
</element>
|
||||
<element name="StorageClass">
|
||||
<ref name="StorageClass"/>
|
||||
</element>
|
||||
<element name="Initiated">
|
||||
<data type="dateTime"/>
|
||||
</element>
|
||||
</element>
|
||||
</zeroOrMore>
|
||||
<zeroOrMore>
|
||||
<element name="CommonPrefixes">
|
||||
<element name="Prefix">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</element>
|
||||
</zeroOrMore>
|
||||
</element>
|
||||
</start>
|
||||
</grammar>
|
59
swift/common/middleware/s3api/schema/list_parts_result.rng
Normal file
59
swift/common/middleware/s3api/schema/list_parts_result.rng
Normal file
@ -0,0 +1,59 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
|
||||
<include href="common.rng"/>
|
||||
<start>
|
||||
<element name="ListPartsResult">
|
||||
<element name="Bucket">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="Key">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="UploadId">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="Initiator">
|
||||
<ref name="CanonicalUser"/>
|
||||
</element>
|
||||
<element name="Owner">
|
||||
<ref name="CanonicalUser"/>
|
||||
</element>
|
||||
<element name="StorageClass">
|
||||
<ref name="StorageClass"/>
|
||||
</element>
|
||||
<element name="PartNumberMarker">
|
||||
<data type="int"/>
|
||||
</element>
|
||||
<element name="NextPartNumberMarker">
|
||||
<data type="int"/>
|
||||
</element>
|
||||
<element name="MaxParts">
|
||||
<data type="int"/>
|
||||
</element>
|
||||
<optional>
|
||||
<element name="EncodingType">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
<element name="IsTruncated">
|
||||
<data type="boolean"/>
|
||||
</element>
|
||||
<zeroOrMore>
|
||||
<element name="Part">
|
||||
<element name="PartNumber">
|
||||
<data type="int"/>
|
||||
</element>
|
||||
<element name="LastModified">
|
||||
<data type="dateTime"/>
|
||||
</element>
|
||||
<element name="ETag">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="Size">
|
||||
<data type="long"/>
|
||||
</element>
|
||||
</element>
|
||||
</zeroOrMore>
|
||||
</element>
|
||||
</start>
|
||||
</grammar>
|
104
swift/common/middleware/s3api/schema/list_versions_result.rng
Normal file
104
swift/common/middleware/s3api/schema/list_versions_result.rng
Normal file
@ -0,0 +1,104 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
|
||||
<include href="common.rng"/>
|
||||
<start>
|
||||
<element name="ListVersionsResult">
|
||||
<element name="Name">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="Prefix">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="KeyMarker">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="VersionIdMarker">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<optional>
|
||||
<element name="NextKeyMarker">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
<optional>
|
||||
<element name="NextVersionIdMarker">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
<element name="MaxKeys">
|
||||
<data type="int"/>
|
||||
</element>
|
||||
<optional>
|
||||
<element name="EncodingType">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
<optional>
|
||||
<element name="Delimiter">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
<element name="IsTruncated">
|
||||
<data type="boolean"/>
|
||||
</element>
|
||||
<zeroOrMore>
|
||||
<choice>
|
||||
<element name="Version">
|
||||
<element name="Key">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="VersionId">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="IsLatest">
|
||||
<data type="boolean"/>
|
||||
</element>
|
||||
<element name="LastModified">
|
||||
<data type="dateTime"/>
|
||||
</element>
|
||||
<element name="ETag">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="Size">
|
||||
<data type="long"/>
|
||||
</element>
|
||||
<optional>
|
||||
<element name="Owner">
|
||||
<ref name="CanonicalUser"/>
|
||||
</element>
|
||||
</optional>
|
||||
<element name="StorageClass">
|
||||
<ref name="StorageClass"/>
|
||||
</element>
|
||||
</element>
|
||||
<element name="DeleteMarker">
|
||||
<element name="Key">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="VersionId">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
<element name="IsLatest">
|
||||
<data type="boolean"/>
|
||||
</element>
|
||||
<element name="LastModified">
|
||||
<data type="dateTime"/>
|
||||
</element>
|
||||
<optional>
|
||||
<element name="Owner">
|
||||
<ref name="CanonicalUser"/>
|
||||
</element>
|
||||
</optional>
|
||||
</element>
|
||||
</choice>
|
||||
</zeroOrMore>
|
||||
<zeroOrMore>
|
||||
<element name="CommonPrefixes">
|
||||
<element name="Prefix">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</element>
|
||||
</zeroOrMore>
|
||||
</element>
|
||||
</start>
|
||||
</grammar>
|
@ -0,0 +1,8 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
|
||||
<start>
|
||||
<element name="LocationConstraint">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</start>
|
||||
</grammar>
|
@ -0,0 +1,25 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<grammar xmlns="http://relaxng.org/ns/structure/1.0">
|
||||
<start>
|
||||
<element name="VersioningConfiguration">
|
||||
<interleave>
|
||||
<optional>
|
||||
<element name="Status">
|
||||
<choice>
|
||||
<value>Enabled</value>
|
||||
<value>Suspended</value>
|
||||
</choice>
|
||||
</element>
|
||||
</optional>
|
||||
<optional>
|
||||
<element name="MfaDelete">
|
||||
<choice>
|
||||
<value>Enabled</value>
|
||||
<value>Disabled</value>
|
||||
</choice>
|
||||
</element>
|
||||
</optional>
|
||||
</interleave>
|
||||
</element>
|
||||
</start>
|
||||
</grammar>
|
563
swift/common/middleware/s3api/subresource.py
Normal file
563
swift/common/middleware/s3api/subresource.py
Normal file
@ -0,0 +1,563 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
---------------------------
|
||||
s3api's ACLs implementation
|
||||
---------------------------
|
||||
s3api uses a different implementation approach to achieve S3 ACLs.
|
||||
|
||||
First, we should understand what we have to design to achieve real S3 ACLs.
|
||||
Current s3api(real S3)'s ACLs Model is as follows::
|
||||
|
||||
AccessControlPolicy:
|
||||
Owner:
|
||||
AccessControlList:
|
||||
Grant[n]:
|
||||
(Grantee, Permission)
|
||||
|
||||
Each bucket or object has its own acl consisting of Owner and
|
||||
AcessControlList. AccessControlList can contain some Grants.
|
||||
By default, AccessControlList has only one Grant to allow FULL
|
||||
CONTROLL to owner. Each Grant includes single pair with Grantee,
|
||||
Permission. Grantee is the user (or user group) allowed the given permission.
|
||||
|
||||
This module defines the groups and the relation tree.
|
||||
|
||||
If you wanna get more information about S3's ACLs model in detail,
|
||||
please see official documentation here,
|
||||
|
||||
http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
|
||||
|
||||
"""
|
||||
from functools import partial
|
||||
|
||||
from swift.common.utils import json
|
||||
|
||||
from swift.common.middleware.s3api.s3response import InvalidArgument, \
|
||||
MalformedACLError, S3NotImplemented, InvalidRequest, AccessDenied
|
||||
from swift.common.middleware.s3api.etree import Element, SubElement, tostring
|
||||
from swift.common.middleware.s3api.utils import sysmeta_header
|
||||
from swift.common.middleware.s3api.exception import InvalidSubresource
|
||||
|
||||
XMLNS_XSI = 'http://www.w3.org/2001/XMLSchema-instance'
|
||||
PERMISSIONS = ['FULL_CONTROL', 'READ', 'WRITE', 'READ_ACP', 'WRITE_ACP']
|
||||
LOG_DELIVERY_USER = '.log_delivery'
|
||||
|
||||
|
||||
def encode_acl(resource, acl):
|
||||
"""
|
||||
Encode an ACL instance to Swift metadata.
|
||||
|
||||
Given a resource type and an ACL instance, this method returns HTTP
|
||||
headers, which can be used for Swift metadata.
|
||||
"""
|
||||
header_value = {"Owner": acl.owner.id}
|
||||
grants = []
|
||||
for grant in acl.grants:
|
||||
grant = {"Permission": grant.permission,
|
||||
"Grantee": str(grant.grantee)}
|
||||
grants.append(grant)
|
||||
header_value.update({"Grant": grants})
|
||||
headers = {}
|
||||
key = sysmeta_header(resource, 'acl')
|
||||
headers[key] = json.dumps(header_value, separators=(',', ':'))
|
||||
|
||||
return headers
|
||||
|
||||
|
||||
def decode_acl(resource, headers, allow_no_owner):
|
||||
"""
|
||||
Decode Swift metadata to an ACL instance.
|
||||
|
||||
Given a resource type and HTTP headers, this method returns an ACL
|
||||
instance.
|
||||
"""
|
||||
value = ''
|
||||
|
||||
key = sysmeta_header(resource, 'acl')
|
||||
if key in headers:
|
||||
value = headers[key]
|
||||
|
||||
if value == '':
|
||||
# Fix me: In the case of value is empty or not dict instance,
|
||||
# I want an instance of Owner as None.
|
||||
# However, in the above process would occur error in reference
|
||||
# to an instance variable of Owner.
|
||||
return ACL(Owner(None, None), [], True, allow_no_owner)
|
||||
|
||||
try:
|
||||
encode_value = json.loads(value)
|
||||
if not isinstance(encode_value, dict):
|
||||
return ACL(Owner(None, None), [], True, allow_no_owner)
|
||||
|
||||
id = None
|
||||
name = None
|
||||
grants = []
|
||||
if 'Owner' in encode_value:
|
||||
id = encode_value['Owner']
|
||||
name = encode_value['Owner']
|
||||
if 'Grant' in encode_value:
|
||||
for grant in encode_value['Grant']:
|
||||
grantee = None
|
||||
# pylint: disable-msg=E1101
|
||||
for group in Group.__subclasses__():
|
||||
if group.__name__ == grant['Grantee']:
|
||||
grantee = group()
|
||||
if not grantee:
|
||||
grantee = User(grant['Grantee'])
|
||||
permission = grant['Permission']
|
||||
grants.append(Grant(grantee, permission))
|
||||
return ACL(Owner(id, name), grants, True, allow_no_owner)
|
||||
except Exception as e:
|
||||
raise InvalidSubresource((resource, 'acl', value), e)
|
||||
|
||||
|
||||
class Grantee(object):
|
||||
"""
|
||||
Base class for grantee.
|
||||
|
||||
Methods:
|
||||
|
||||
* init: create a Grantee instance
|
||||
* elem: create an ElementTree from itself
|
||||
|
||||
Static Methods:
|
||||
|
||||
* from_header: convert a grantee string in the HTTP header
|
||||
to an Grantee instance.
|
||||
* from_elem: convert a ElementTree to an Grantee instance.
|
||||
|
||||
"""
|
||||
# Needs confirmation whether we really need these methods or not.
|
||||
# * encode (method): create a JSON which includes whole own elements
|
||||
# * encode_from_elem (static method): convert from an ElementTree to a JSON
|
||||
# * elem_from_json (static method): convert from a JSON to an ElementTree
|
||||
# * from_json (static method): convert a Json string to an Grantee
|
||||
# instance.
|
||||
|
||||
def __contains__(self, key):
|
||||
"""
|
||||
The key argument is a S3 user id. This method checks that the user id
|
||||
belongs to this class.
|
||||
"""
|
||||
raise S3NotImplemented()
|
||||
|
||||
def elem(self):
|
||||
"""
|
||||
Get an etree element of this instance.
|
||||
"""
|
||||
raise S3NotImplemented()
|
||||
|
||||
@staticmethod
|
||||
def from_elem(elem):
|
||||
type = elem.get('{%s}type' % XMLNS_XSI)
|
||||
if type == 'CanonicalUser':
|
||||
value = elem.find('./ID').text
|
||||
return User(value)
|
||||
elif type == 'Group':
|
||||
value = elem.find('./URI').text
|
||||
subclass = get_group_subclass_from_uri(value)
|
||||
return subclass()
|
||||
elif type == 'AmazonCustomerByEmail':
|
||||
raise S3NotImplemented()
|
||||
else:
|
||||
raise MalformedACLError()
|
||||
|
||||
@staticmethod
|
||||
def from_header(grantee):
|
||||
"""
|
||||
Convert a grantee string in the HTTP header to an Grantee instance.
|
||||
"""
|
||||
type, value = grantee.split('=', 1)
|
||||
value = value.strip('"\'')
|
||||
if type == 'id':
|
||||
return User(value)
|
||||
elif type == 'emailAddress':
|
||||
raise S3NotImplemented()
|
||||
elif type == 'uri':
|
||||
# return a subclass instance of Group class
|
||||
subclass = get_group_subclass_from_uri(value)
|
||||
return subclass()
|
||||
else:
|
||||
raise InvalidArgument(type, value,
|
||||
'Argument format not recognized')
|
||||
|
||||
|
||||
class User(Grantee):
|
||||
"""
|
||||
Canonical user class for S3 accounts.
|
||||
"""
|
||||
type = 'CanonicalUser'
|
||||
|
||||
def __init__(self, name):
|
||||
self.id = name
|
||||
self.display_name = name
|
||||
|
||||
def __contains__(self, key):
|
||||
return key == self.id
|
||||
|
||||
def elem(self):
|
||||
elem = Element('Grantee', nsmap={'xsi': XMLNS_XSI})
|
||||
elem.set('{%s}type' % XMLNS_XSI, self.type)
|
||||
SubElement(elem, 'ID').text = self.id
|
||||
SubElement(elem, 'DisplayName').text = self.display_name
|
||||
return elem
|
||||
|
||||
def __str__(self):
|
||||
return self.display_name
|
||||
|
||||
|
||||
class Owner(object):
|
||||
"""
|
||||
Owner class for S3 accounts
|
||||
"""
|
||||
def __init__(self, id, name):
|
||||
self.id = id
|
||||
self.name = name
|
||||
|
||||
|
||||
def get_group_subclass_from_uri(uri):
|
||||
"""
|
||||
Convert a URI to one of the predefined groups.
|
||||
"""
|
||||
for group in Group.__subclasses__(): # pylint: disable-msg=E1101
|
||||
if group.uri == uri:
|
||||
return group
|
||||
raise InvalidArgument('uri', uri, 'Invalid group uri')
|
||||
|
||||
|
||||
class Group(Grantee):
|
||||
"""
|
||||
Base class for Amazon S3 Predefined Groups
|
||||
"""
|
||||
type = 'Group'
|
||||
uri = ''
|
||||
|
||||
def __init__(self):
|
||||
# Initialize method to clarify this has nothing to do
|
||||
pass
|
||||
|
||||
def elem(self):
|
||||
elem = Element('Grantee', nsmap={'xsi': XMLNS_XSI})
|
||||
elem.set('{%s}type' % XMLNS_XSI, self.type)
|
||||
SubElement(elem, 'URI').text = self.uri
|
||||
|
||||
return elem
|
||||
|
||||
def __str__(self):
|
||||
return self.__class__.__name__
|
||||
|
||||
|
||||
def canned_acl_grantees(bucket_owner, object_owner=None):
|
||||
"""
|
||||
A set of predefined grants supported by AWS S3.
|
||||
"""
|
||||
owner = object_owner or bucket_owner
|
||||
|
||||
return {
|
||||
'private': [
|
||||
('FULL_CONTROL', User(owner.name)),
|
||||
],
|
||||
'public-read': [
|
||||
('READ', AllUsers()),
|
||||
('FULL_CONTROL', User(owner.name)),
|
||||
],
|
||||
'public-read-write': [
|
||||
('READ', AllUsers()),
|
||||
('WRITE', AllUsers()),
|
||||
('FULL_CONTROL', User(owner.name)),
|
||||
],
|
||||
'authenticated-read': [
|
||||
('READ', AuthenticatedUsers()),
|
||||
('FULL_CONTROL', User(owner.name)),
|
||||
],
|
||||
'bucket-owner-read': [
|
||||
('READ', User(bucket_owner.name)),
|
||||
('FULL_CONTROL', User(owner.name)),
|
||||
],
|
||||
'bucket-owner-full-control': [
|
||||
('FULL_CONTROL', User(owner.name)),
|
||||
('FULL_CONTROL', User(bucket_owner.name)),
|
||||
],
|
||||
'log-delivery-write': [
|
||||
('WRITE', LogDelivery()),
|
||||
('READ_ACP', LogDelivery()),
|
||||
('FULL_CONTROL', User(owner.name)),
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
class AuthenticatedUsers(Group):
|
||||
"""
|
||||
This group represents all AWS accounts. Access permission to this group
|
||||
allows any AWS account to access the resource. However, all requests must
|
||||
be signed (authenticated).
|
||||
"""
|
||||
uri = 'http://acs.amazonaws.com/groups/global/AuthenticatedUsers'
|
||||
|
||||
def __contains__(self, key):
|
||||
# s3api handles only signed requests.
|
||||
return True
|
||||
|
||||
|
||||
class AllUsers(Group):
|
||||
"""
|
||||
Access permission to this group allows anyone to access the resource. The
|
||||
requests can be signed (authenticated) or unsigned (anonymous). Unsigned
|
||||
requests omit the Authentication header in the request.
|
||||
|
||||
Note: s3api regards unsigned requests as Swift API accesses, and bypasses
|
||||
them to Swift. As a result, AllUsers behaves completely same as
|
||||
AuthenticatedUsers.
|
||||
"""
|
||||
uri = 'http://acs.amazonaws.com/groups/global/AllUsers'
|
||||
|
||||
def __contains__(self, key):
|
||||
return True
|
||||
|
||||
|
||||
class LogDelivery(Group):
|
||||
"""
|
||||
WRITE and READ_ACP permissions on a bucket enables this group to write
|
||||
server access logs to the bucket.
|
||||
"""
|
||||
uri = 'http://acs.amazonaws.com/groups/s3/LogDelivery'
|
||||
|
||||
def __contains__(self, key):
|
||||
if ':' in key:
|
||||
tenant, user = key.split(':', 1)
|
||||
else:
|
||||
user = key
|
||||
return user == LOG_DELIVERY_USER
|
||||
|
||||
|
||||
class Grant(object):
|
||||
"""
|
||||
Grant Class which includes both Grantee and Permission
|
||||
"""
|
||||
|
||||
def __init__(self, grantee, permission):
|
||||
"""
|
||||
:param grantee: a grantee class or its subclass
|
||||
:param permission: string
|
||||
"""
|
||||
if permission.upper() not in PERMISSIONS:
|
||||
raise S3NotImplemented()
|
||||
if not isinstance(grantee, Grantee):
|
||||
raise ValueError()
|
||||
self.grantee = grantee
|
||||
self.permission = permission
|
||||
|
||||
@classmethod
|
||||
def from_elem(cls, elem):
|
||||
"""
|
||||
Convert an ElementTree to an ACL instance
|
||||
"""
|
||||
grantee = Grantee.from_elem(elem.find('./Grantee'))
|
||||
permission = elem.find('./Permission').text
|
||||
return cls(grantee, permission)
|
||||
|
||||
def elem(self):
|
||||
"""
|
||||
Create an etree element.
|
||||
"""
|
||||
elem = Element('Grant')
|
||||
elem.append(self.grantee.elem())
|
||||
SubElement(elem, 'Permission').text = self.permission
|
||||
|
||||
return elem
|
||||
|
||||
def allow(self, grantee, permission):
|
||||
return permission == self.permission and grantee in self.grantee
|
||||
|
||||
|
||||
class ACL(object):
|
||||
"""
|
||||
S3 ACL class.
|
||||
|
||||
Refs (S3 API - acl-overview:
|
||||
http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html):
|
||||
|
||||
The sample ACL includes an Owner element identifying the owner via the
|
||||
AWS account's canonical user ID. The Grant element identifies the grantee
|
||||
(either an AWS account or a predefined group), and the permission granted.
|
||||
This default ACL has one Grant element for the owner. You grant permissions
|
||||
by adding Grant elements, each grant identifying the grantee and the
|
||||
permission.
|
||||
"""
|
||||
metadata_name = 'acl'
|
||||
root_tag = 'AccessControlPolicy'
|
||||
max_xml_length = 200 * 1024
|
||||
|
||||
def __init__(self, owner, grants=None, s3_acl=False, allow_no_owner=False):
|
||||
"""
|
||||
:param owner: Owner instance for ACL instance
|
||||
:param grants: a list of Grant instances
|
||||
:param s3_acl: boolean indicates whether this class is used under
|
||||
s3_acl is True or False (from s3api middleware configuration)
|
||||
:param allow_no_owner: boolean indicates this ACL instance can be
|
||||
handled when no owner information found
|
||||
"""
|
||||
self.owner = owner
|
||||
self.grants = grants or []
|
||||
self.s3_acl = s3_acl
|
||||
self.allow_no_owner = allow_no_owner
|
||||
|
||||
def __repr__(self):
|
||||
return tostring(self.elem())
|
||||
|
||||
@classmethod
|
||||
def from_elem(cls, elem, s3_acl=False, allow_no_owner=False):
|
||||
"""
|
||||
Convert an ElementTree to an ACL instance
|
||||
"""
|
||||
id = elem.find('./Owner/ID').text
|
||||
try:
|
||||
name = elem.find('./Owner/DisplayName').text
|
||||
except AttributeError:
|
||||
name = id
|
||||
|
||||
grants = [Grant.from_elem(e)
|
||||
for e in elem.findall('./AccessControlList/Grant')]
|
||||
return cls(Owner(id, name), grants, s3_acl, allow_no_owner)
|
||||
|
||||
def elem(self):
|
||||
"""
|
||||
Decode the value to an ACL instance.
|
||||
"""
|
||||
elem = Element(self.root_tag)
|
||||
|
||||
owner = SubElement(elem, 'Owner')
|
||||
SubElement(owner, 'ID').text = self.owner.id
|
||||
SubElement(owner, 'DisplayName').text = self.owner.name
|
||||
|
||||
SubElement(elem, 'AccessControlList').extend(
|
||||
g.elem() for g in self.grants
|
||||
)
|
||||
|
||||
return elem
|
||||
|
||||
def check_owner(self, user_id):
|
||||
"""
|
||||
Check that the user is an owner.
|
||||
"""
|
||||
if not self.s3_acl:
|
||||
# Ignore S3api ACL.
|
||||
return
|
||||
|
||||
if not self.owner.id:
|
||||
if self.allow_no_owner:
|
||||
# No owner means public.
|
||||
return
|
||||
raise AccessDenied()
|
||||
|
||||
if user_id != self.owner.id:
|
||||
raise AccessDenied()
|
||||
|
||||
def check_permission(self, user_id, permission):
|
||||
"""
|
||||
Check that the user has a permission.
|
||||
"""
|
||||
if not self.s3_acl:
|
||||
# Ignore S3api ACL.
|
||||
return
|
||||
|
||||
try:
|
||||
# owners have full control permission
|
||||
self.check_owner(user_id)
|
||||
return
|
||||
except AccessDenied:
|
||||
pass
|
||||
|
||||
if permission in PERMISSIONS:
|
||||
for g in self.grants:
|
||||
if g.allow(user_id, 'FULL_CONTROL') or \
|
||||
g.allow(user_id, permission):
|
||||
return
|
||||
|
||||
raise AccessDenied()
|
||||
|
||||
@classmethod
|
||||
def from_headers(cls, headers, bucket_owner, object_owner=None,
|
||||
as_private=True):
|
||||
"""
|
||||
Convert HTTP headers to an ACL instance.
|
||||
"""
|
||||
grants = []
|
||||
try:
|
||||
for key, value in headers.items():
|
||||
if key.lower().startswith('x-amz-grant-'):
|
||||
permission = key[len('x-amz-grant-'):]
|
||||
permission = permission.upper().replace('-', '_')
|
||||
if permission not in PERMISSIONS:
|
||||
continue
|
||||
for grantee in value.split(','):
|
||||
grants.append(
|
||||
Grant(Grantee.from_header(grantee), permission))
|
||||
|
||||
if 'x-amz-acl' in headers:
|
||||
try:
|
||||
acl = headers['x-amz-acl']
|
||||
if len(grants) > 0:
|
||||
err_msg = 'Specifying both Canned ACLs and Header ' \
|
||||
'Grants is not allowed'
|
||||
raise InvalidRequest(err_msg)
|
||||
grantees = canned_acl_grantees(
|
||||
bucket_owner, object_owner)[acl]
|
||||
for permission, grantee in grantees:
|
||||
grants.append(Grant(grantee, permission))
|
||||
except KeyError:
|
||||
# expects canned_acl_grantees()[] raises KeyError
|
||||
raise InvalidArgument('x-amz-acl', headers['x-amz-acl'])
|
||||
except (KeyError, ValueError):
|
||||
# TODO: think about we really catch this except sequence
|
||||
raise InvalidRequest()
|
||||
|
||||
if len(grants) == 0:
|
||||
# No ACL headers
|
||||
if as_private:
|
||||
return ACLPrivate(bucket_owner, object_owner)
|
||||
else:
|
||||
return None
|
||||
|
||||
return cls(object_owner or bucket_owner, grants)
|
||||
|
||||
|
||||
class CannedACL(object):
|
||||
"""
|
||||
A dict-like object that returns canned ACL.
|
||||
"""
|
||||
def __getitem__(self, key):
|
||||
def acl(key, bucket_owner, object_owner=None,
|
||||
s3_acl=False, allow_no_owner=False):
|
||||
grants = []
|
||||
grantees = canned_acl_grantees(bucket_owner, object_owner)[key]
|
||||
for permission, grantee in grantees:
|
||||
grants.append(Grant(grantee, permission))
|
||||
return ACL(object_owner or bucket_owner,
|
||||
grants, s3_acl, allow_no_owner)
|
||||
|
||||
return partial(acl, key)
|
||||
|
||||
|
||||
canned_acl = CannedACL()
|
||||
|
||||
ACLPrivate = canned_acl['private']
|
||||
ACLPublicRead = canned_acl['public-read']
|
||||
ACLPublicReadWrite = canned_acl['public-read-write']
|
||||
ACLAuthenticatedRead = canned_acl['authenticated-read']
|
||||
ACLBucketOwnerRead = canned_acl['bucket-owner-read']
|
||||
ACLBucketOwnerFullControl = canned_acl['bucket-owner-full-control']
|
||||
ACLLogDeliveryWrite = canned_acl['log-delivery-write']
|
190
swift/common/middleware/s3api/utils.py
Normal file
190
swift/common/middleware/s3api/utils.py
Normal file
@ -0,0 +1,190 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import base64
|
||||
import calendar
|
||||
import email.utils
|
||||
import re
|
||||
import time
|
||||
import uuid
|
||||
|
||||
# Need for check_path_header
|
||||
from swift.common import utils
|
||||
|
||||
MULTIUPLOAD_SUFFIX = '+segments'
|
||||
|
||||
|
||||
def sysmeta_prefix(resource):
|
||||
"""
|
||||
Returns the system metadata prefix for given resource type.
|
||||
"""
|
||||
if resource.lower() == 'object':
|
||||
return 'x-object-sysmeta-s3api-'
|
||||
else:
|
||||
return 'x-container-sysmeta-s3api-'
|
||||
|
||||
|
||||
def sysmeta_header(resource, name):
|
||||
"""
|
||||
Returns the system metadata header for given resource type and name.
|
||||
"""
|
||||
return sysmeta_prefix(resource) + name
|
||||
|
||||
|
||||
def camel_to_snake(camel):
|
||||
return re.sub('(.)([A-Z])', r'\1_\2', camel).lower()
|
||||
|
||||
|
||||
def snake_to_camel(snake):
|
||||
return snake.title().replace('_', '')
|
||||
|
||||
|
||||
def unique_id():
|
||||
return base64.urlsafe_b64encode(str(uuid.uuid4()))
|
||||
|
||||
|
||||
def utf8encode(s):
|
||||
if isinstance(s, unicode):
|
||||
s = s.encode('utf8')
|
||||
return s
|
||||
|
||||
|
||||
def utf8decode(s):
|
||||
if isinstance(s, str):
|
||||
s = s.decode('utf8')
|
||||
return s
|
||||
|
||||
|
||||
def validate_bucket_name(name, dns_compliant_bucket_names):
|
||||
"""
|
||||
Validates the name of the bucket against S3 criteria,
|
||||
http://docs.amazonwebservices.com/AmazonS3/latest/BucketRestrictions.html
|
||||
True is valid, False is invalid.
|
||||
"""
|
||||
valid_chars = '-.a-z0-9'
|
||||
if not dns_compliant_bucket_names:
|
||||
valid_chars += 'A-Z_'
|
||||
max_len = 63 if dns_compliant_bucket_names else 255
|
||||
|
||||
if len(name) < 3 or len(name) > max_len or not name[0].isalnum():
|
||||
# Bucket names should be between 3 and 63 (or 255) characters long
|
||||
# Bucket names must start with a letter or a number
|
||||
return False
|
||||
elif dns_compliant_bucket_names and (
|
||||
'.-' in name or '-.' in name or '..' in name or
|
||||
not name[-1].isalnum()):
|
||||
# Bucket names cannot contain dashes next to periods
|
||||
# Bucket names cannot contain two adjacent periods
|
||||
# Bucket names must end with a letter or a number
|
||||
return False
|
||||
elif name.endswith('.'):
|
||||
# Bucket names must not end with dot
|
||||
return False
|
||||
elif re.match("^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.)"
|
||||
"{3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$",
|
||||
name):
|
||||
# Bucket names cannot be formatted as an IP Address
|
||||
return False
|
||||
elif not re.match("^[%s]*$" % valid_chars, name):
|
||||
# Bucket names can contain lowercase letters, numbers, and hyphens.
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
class S3Timestamp(utils.Timestamp):
|
||||
@property
|
||||
def s3xmlformat(self):
|
||||
return self.isoformat[:-7] + '.000Z'
|
||||
|
||||
@property
|
||||
def amz_date_format(self):
|
||||
"""
|
||||
this format should be like 'YYYYMMDDThhmmssZ'
|
||||
"""
|
||||
return self.isoformat.replace(
|
||||
'-', '').replace(':', '')[:-7] + 'Z'
|
||||
|
||||
@classmethod
|
||||
def now(cls):
|
||||
return cls(time.time())
|
||||
|
||||
|
||||
def mktime(timestamp_str, time_format='%Y-%m-%dT%H:%M:%S'):
|
||||
"""
|
||||
mktime creates a float instance in epoch time really like as time.mktime
|
||||
|
||||
the difference from time.mktime is allowing to 2 formats string for the
|
||||
argument for the S3 testing usage.
|
||||
TODO: support
|
||||
|
||||
:param timestamp_str: a string of timestamp formatted as
|
||||
(a) RFC2822 (e.g. date header)
|
||||
(b) %Y-%m-%dT%H:%M:%S (e.g. copy result)
|
||||
:param time_format: a string of format to parse in (b) process
|
||||
:return : a float instance in epoch time
|
||||
"""
|
||||
# time_tuple is the *remote* local time
|
||||
time_tuple = email.utils.parsedate_tz(timestamp_str)
|
||||
if time_tuple is None:
|
||||
time_tuple = time.strptime(timestamp_str, time_format)
|
||||
# add timezone info as utc (no time difference)
|
||||
time_tuple += (0, )
|
||||
|
||||
# We prefer calendar.gmtime and a manual adjustment over
|
||||
# email.utils.mktime_tz because older versions of Python (<2.7.4) may
|
||||
# double-adjust for timezone in some situations (such when swift changes
|
||||
# os.environ['TZ'] without calling time.tzset()).
|
||||
epoch_time = calendar.timegm(time_tuple) - time_tuple[9]
|
||||
|
||||
return epoch_time
|
||||
|
||||
|
||||
class Config(dict):
|
||||
def __init__(self, base=None):
|
||||
if base is not None:
|
||||
self.update(base)
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name not in self:
|
||||
raise AttributeError("No attribute '%s'" % name)
|
||||
|
||||
return self[name]
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
self[name] = value
|
||||
|
||||
def __delattr__(self, name):
|
||||
del self[name]
|
||||
|
||||
def update(self, other):
|
||||
if hasattr(other, 'keys'):
|
||||
for key in other.keys():
|
||||
self[key] = other[key]
|
||||
else:
|
||||
for key, value in other:
|
||||
self[key] = value
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if isinstance(self.get(key), bool):
|
||||
dict.__setitem__(self, key, utils.config_true_value(value))
|
||||
elif isinstance(self.get(key), int):
|
||||
try:
|
||||
dict.__setitem__(self, key, int(value))
|
||||
except ValueError:
|
||||
if value: # No need to raise the error if value is ''
|
||||
raise
|
||||
else:
|
||||
dict.__setitem__(self, key, value)
|
@ -273,7 +273,7 @@ class TempAuth(object):
|
||||
return self.app(env, start_response)
|
||||
if env.get('PATH_INFO', '').startswith(self.auth_prefix):
|
||||
return self.handle(env, start_response)
|
||||
s3 = env.get('swift3.auth_details')
|
||||
s3 = env.get('s3api.auth_details')
|
||||
token = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN'))
|
||||
service_token = env.get('HTTP_X_SERVICE_TOKEN')
|
||||
if s3 or (token and token.startswith(self.reseller_prefix)):
|
||||
@ -435,7 +435,7 @@ class TempAuth(object):
|
||||
else:
|
||||
groups = groups.encode('utf8')
|
||||
|
||||
s3_auth_details = env.get('swift3.auth_details')
|
||||
s3_auth_details = env.get('s3api.auth_details')
|
||||
if s3_auth_details:
|
||||
if 'check_signature' not in s3_auth_details:
|
||||
self.logger.warning(
|
||||
|
@ -12,6 +12,12 @@ os-testr>=0.8.0 # Apache-2.0
|
||||
mock>=2.0 # BSD
|
||||
python-swiftclient
|
||||
python-keystoneclient!=2.1.0,>=2.0.0 # Apache-2.0
|
||||
reno>=1.8.0 # Apache-2.0
|
||||
python-openstackclient
|
||||
boto
|
||||
requests-mock>=1.2.0 # Apache-2.0
|
||||
fixtures>=3.0.0 # Apache-2.0/BSD
|
||||
keystonemiddleware>=4.17.0 # Apache-2.0
|
||||
|
||||
# Security checks
|
||||
bandit>=1.1.0 # Apache-2.0
|
||||
|
@ -412,6 +412,46 @@ def _load_domain_remap_staticweb(proxy_conf_file, swift_conf_file, **kwargs):
|
||||
return test_conf_file, swift_conf_file
|
||||
|
||||
|
||||
def _load_s3api(proxy_conf_file, swift_conf_file, **kwargs):
|
||||
"""
|
||||
Load s3api configuration and override proxy-server.conf contents.
|
||||
|
||||
:param proxy_conf_file: Source proxy conf filename
|
||||
:param swift_conf_file: Source swift conf filename
|
||||
:returns: Tuple of paths to the proxy conf file and swift conf file to use
|
||||
:raises InProcessException: raised if proxy conf contents are invalid
|
||||
"""
|
||||
_debug('Setting configuration for s3api')
|
||||
|
||||
# The global conf dict cannot be used to modify the pipeline.
|
||||
# The pipeline loader requires the pipeline to be set in the local_conf.
|
||||
# If pipeline is set in the global conf dict (which in turn populates the
|
||||
# DEFAULTS options) then it prevents pipeline being loaded into the local
|
||||
# conf during wsgi load_app.
|
||||
# Therefore we must modify the [pipeline:main] section.
|
||||
|
||||
conf = ConfigParser()
|
||||
conf.read(proxy_conf_file)
|
||||
try:
|
||||
section = 'pipeline:main'
|
||||
pipeline = conf.get(section, 'pipeline')
|
||||
pipeline = pipeline.replace(
|
||||
"tempauth",
|
||||
"s3api tempauth")
|
||||
conf.set(section, 'pipeline', pipeline)
|
||||
conf.set('filter:s3api', 's3_acl', 'true')
|
||||
except NoSectionError as err:
|
||||
msg = 'Error problem with proxy conf file %s: %s' % \
|
||||
(proxy_conf_file, err)
|
||||
raise InProcessException(msg)
|
||||
|
||||
test_conf_file = os.path.join(_testdir, 'proxy-server.conf')
|
||||
with open(test_conf_file, 'w') as fp:
|
||||
conf.write(fp)
|
||||
|
||||
return test_conf_file, swift_conf_file
|
||||
|
||||
|
||||
# Mapping from possible values of the variable
|
||||
# SWIFT_TEST_IN_PROCESS_CONF_LOADER
|
||||
# to the method to call for loading the associated configuration
|
||||
@ -421,6 +461,7 @@ conf_loaders = {
|
||||
'encryption': _load_encryption,
|
||||
'ec': _load_ec_as_default_policy,
|
||||
'domain_remap_staticweb': _load_domain_remap_staticweb,
|
||||
's3api': _load_s3api,
|
||||
}
|
||||
|
||||
|
||||
@ -520,6 +561,12 @@ def in_process_setup(the_object_server=object_server):
|
||||
'account_autocreate': 'true',
|
||||
'allow_versions': 'True',
|
||||
'allow_versioned_writes': 'True',
|
||||
# TODO: move this into s3api config loader because they are
|
||||
# required by only s3api
|
||||
'allowed_headers':
|
||||
"Content-Disposition, Content-Encoding, X-Delete-At, "
|
||||
"X-Object-Manifest, X-Static-Large-Object, Cache-Control, "
|
||||
"Content-Language, Expires, X-Robots-Tag",
|
||||
# Below are values used by the functional test framework, as well as
|
||||
# by the various in-process swift servers
|
||||
'auth_host': '127.0.0.1',
|
||||
@ -531,6 +578,8 @@ def in_process_setup(the_object_server=object_server):
|
||||
'account': 'test',
|
||||
'username': 'tester',
|
||||
'password': 'testing',
|
||||
's3_access_key': 'test:tester',
|
||||
's3_secret_key': 'testing',
|
||||
# User on a second account (needs admin access to the account)
|
||||
'account2': 'test2',
|
||||
'username2': 'tester2',
|
||||
@ -538,6 +587,8 @@ def in_process_setup(the_object_server=object_server):
|
||||
# User on same account as first, but without admin access
|
||||
'username3': 'tester3',
|
||||
'password3': 'testing3',
|
||||
's3_access_key2': 'test:tester3',
|
||||
's3_secret_key2': 'testing3',
|
||||
# Service user and prefix (emulates glance, cinder, etc. user)
|
||||
'account5': 'test5',
|
||||
'username5': 'tester5',
|
||||
|
61
test/functional/s3api/__init__.py
Normal file
61
test/functional/s3api/__init__.py
Normal file
@ -0,0 +1,61 @@
|
||||
# Copyright (c) 2011-2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest2
|
||||
import traceback
|
||||
import test.functional as tf
|
||||
from test.functional.s3api.s3_test_client import Connection
|
||||
|
||||
|
||||
def setUpModule():
|
||||
tf.setup_package()
|
||||
|
||||
|
||||
def tearDownModule():
|
||||
tf.teardown_package()
|
||||
|
||||
|
||||
class S3ApiBase(unittest2.TestCase):
|
||||
def __init__(self, method_name):
|
||||
super(S3ApiBase, self).__init__(method_name)
|
||||
self.method_name = method_name
|
||||
|
||||
def setUp(self):
|
||||
if 's3api' not in tf.cluster_info:
|
||||
raise tf.SkipTest('s3api middleware is not enabled')
|
||||
try:
|
||||
self.conn = Connection()
|
||||
self.conn.reset()
|
||||
except Exception:
|
||||
message = '%s got an error during initialize process.\n\n%s' % \
|
||||
(self.method_name, traceback.format_exc())
|
||||
# TODO: Find a way to make this go to FAIL instead of Error
|
||||
self.fail(message)
|
||||
|
||||
def assertCommonResponseHeaders(self, headers, etag=None):
|
||||
"""
|
||||
asserting common response headers with args
|
||||
:param headers: a dict of response headers
|
||||
:param etag: a string of md5(content).hexdigest() if not given,
|
||||
this won't assert anything about etag. (e.g. DELETE obj)
|
||||
"""
|
||||
self.assertTrue(headers['x-amz-id-2'] is not None)
|
||||
self.assertTrue(headers['x-amz-request-id'] is not None)
|
||||
self.assertTrue(headers['date'] is not None)
|
||||
# TODO; requires consideration
|
||||
# self.assertTrue(headers['server'] is not None)
|
||||
if etag is not None:
|
||||
self.assertTrue('etag' in headers) # sanity
|
||||
self.assertEqual(etag, headers['etag'].strip('"'))
|
139
test/functional/s3api/s3_test_client.py
Normal file
139
test/functional/s3api/s3_test_client.py
Normal file
@ -0,0 +1,139 @@
|
||||
# Copyright (c) 2015 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import test.functional as tf
|
||||
from boto.s3.connection import S3Connection, OrdinaryCallingFormat, \
|
||||
BotoClientError, S3ResponseError
|
||||
|
||||
RETRY_COUNT = 3
|
||||
|
||||
|
||||
def setUpModule():
|
||||
tf.setup_package()
|
||||
|
||||
|
||||
def tearDownModule():
|
||||
tf.teardown_package()
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""
|
||||
Connection class used for S3 functional testing.
|
||||
"""
|
||||
def __init__(self, aws_access_key='test:tester',
|
||||
aws_secret_key='testing',
|
||||
user_id='test:tester'):
|
||||
"""
|
||||
Initialize method.
|
||||
|
||||
:param aws_access_key: a string of aws access key
|
||||
:param aws_secret_key: a string of aws secret key
|
||||
:param user_id: a string consists of TENANT and USER name used for
|
||||
asserting Owner ID (not required S3Connection)
|
||||
|
||||
In default, Connection class will be initialized as tester user
|
||||
behaves as:
|
||||
user_test_tester = testing .admin
|
||||
|
||||
"""
|
||||
self.aws_access_key = aws_access_key
|
||||
self.aws_secret_key = aws_secret_key
|
||||
self.user_id = user_id
|
||||
# NOTE: auth_host and auth_port can be different from storage location
|
||||
self.host = tf.config['auth_host']
|
||||
self.port = int(tf.config['auth_port'])
|
||||
self.conn = \
|
||||
S3Connection(aws_access_key, aws_secret_key, is_secure=False,
|
||||
host=self.host, port=self.port,
|
||||
calling_format=OrdinaryCallingFormat())
|
||||
self.conn.auth_region_name = 'US'
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Reset all swift environment to keep clean. As a result by calling this
|
||||
method, we can assume the backend swift keeps no containers and no
|
||||
objects on this connection's account.
|
||||
"""
|
||||
exceptions = []
|
||||
for i in range(RETRY_COUNT):
|
||||
try:
|
||||
buckets = self.conn.get_all_buckets()
|
||||
if not buckets:
|
||||
break
|
||||
|
||||
for bucket in buckets:
|
||||
try:
|
||||
for upload in bucket.list_multipart_uploads():
|
||||
upload.cancel_upload()
|
||||
|
||||
for obj in bucket.list():
|
||||
bucket.delete_key(obj.name)
|
||||
|
||||
self.conn.delete_bucket(bucket.name)
|
||||
except S3ResponseError as e:
|
||||
# 404 means NoSuchBucket, NoSuchKey, or NoSuchUpload
|
||||
if e.status != 404:
|
||||
raise
|
||||
except (BotoClientError, S3ResponseError) as e:
|
||||
exceptions.append(e)
|
||||
if exceptions:
|
||||
# raise the first exception
|
||||
raise exceptions.pop(0)
|
||||
|
||||
def make_request(self, method, bucket='', obj='', headers=None, body='',
|
||||
query=None):
|
||||
"""
|
||||
Wrapper method of S3Connection.make_request.
|
||||
|
||||
:param method: a string of HTTP request method
|
||||
:param bucket: a string of bucket name
|
||||
:param obj: a string of object name
|
||||
:param headers: a dictionary of headers
|
||||
:param body: a string of data binary sent to S3 as a request body
|
||||
:param query: a string of HTTP query argument
|
||||
|
||||
:returns: a tuple of (int(status_code), headers dict, response body)
|
||||
"""
|
||||
response = \
|
||||
self.conn.make_request(method, bucket=bucket, key=obj,
|
||||
headers=headers, data=body,
|
||||
query_args=query, sender=None,
|
||||
override_num_retries=RETRY_COUNT,
|
||||
retry_handler=None)
|
||||
return response.status, dict(response.getheaders()), response.read()
|
||||
|
||||
def generate_url_and_headers(self, method, bucket='', obj='',
|
||||
expires_in=3600):
|
||||
url = self.conn.generate_url(expires_in, method, bucket, obj)
|
||||
if os.environ.get('S3_USE_SIGV4') == "True":
|
||||
# V4 signatures are known-broken in boto, but we can work around it
|
||||
if url.startswith('https://'):
|
||||
url = 'http://' + url[8:]
|
||||
return url, {'Host': '%(host)s:%(port)d:%(port)d' % {
|
||||
'host': self.host, 'port': self.port}}
|
||||
return url, {}
|
||||
|
||||
|
||||
# TODO: make sure where this function is used
|
||||
def get_admin_connection():
|
||||
"""
|
||||
Return tester connection behaves as:
|
||||
user_test_admin = admin .admin
|
||||
"""
|
||||
aws_access_key = tf.config['s3_access_key']
|
||||
aws_secret_key = tf.config['s3_secret_key']
|
||||
user_id = tf.config['s3_access_key']
|
||||
return Connection(aws_access_key, aws_secret_key, user_id)
|
156
test/functional/s3api/test_acl.py
Normal file
156
test/functional/s3api/test_acl.py
Normal file
@ -0,0 +1,156 @@
|
||||
# Copyright (c) 2015 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest2
|
||||
import os
|
||||
import test.functional as tf
|
||||
from swift.common.middleware.s3api.etree import fromstring
|
||||
from test.functional.s3api import S3ApiBase
|
||||
from test.functional.s3api.s3_test_client import Connection
|
||||
from test.functional.s3api.utils import get_error_code
|
||||
|
||||
|
||||
def setUpModule():
|
||||
tf.setup_package()
|
||||
|
||||
|
||||
def tearDownModule():
|
||||
tf.teardown_package()
|
||||
|
||||
|
||||
class TestS3Acl(S3ApiBase):
|
||||
def setUp(self):
|
||||
super(TestS3Acl, self).setUp()
|
||||
self.bucket = 'bucket'
|
||||
self.obj = 'object'
|
||||
if 's3_access_key2' not in tf.config or \
|
||||
's3_secret_key2' not in tf.config:
|
||||
raise tf.SkipTest(
|
||||
'TestS3Acl requires s3_access_key2 and s3_secret_key2 setting')
|
||||
self.conn.make_request('PUT', self.bucket)
|
||||
access_key2 = tf.config['s3_access_key2']
|
||||
secret_key2 = tf.config['s3_secret_key2']
|
||||
self.conn2 = Connection(access_key2, secret_key2, access_key2)
|
||||
|
||||
def test_acl(self):
|
||||
self.conn.make_request('PUT', self.bucket, self.obj)
|
||||
query = 'acl'
|
||||
|
||||
# PUT Bucket ACL
|
||||
headers = {'x-amz-acl': 'public-read'}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', self.bucket, headers=headers,
|
||||
query=query)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertEqual(headers['content-length'], '0')
|
||||
|
||||
# GET Bucket ACL
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', self.bucket, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
# TODO: Fix the response that last-modified must be in the response.
|
||||
# self.assertTrue(headers['last-modified'] is not None)
|
||||
self.assertEqual(headers['content-length'], str(len(body)))
|
||||
self.assertTrue(headers['content-type'] is not None)
|
||||
elem = fromstring(body, 'AccessControlPolicy')
|
||||
owner = elem.find('Owner')
|
||||
self.assertEqual(owner.find('ID').text, self.conn.user_id)
|
||||
self.assertEqual(owner.find('DisplayName').text, self.conn.user_id)
|
||||
acl = elem.find('AccessControlList')
|
||||
self.assertTrue(acl.find('Grant') is not None)
|
||||
|
||||
# GET Object ACL
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', self.bucket, self.obj, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
# TODO: Fix the response that last-modified must be in the response.
|
||||
# self.assertTrue(headers['last-modified'] is not None)
|
||||
self.assertEqual(headers['content-length'], str(len(body)))
|
||||
self.assertTrue(headers['content-type'] is not None)
|
||||
elem = fromstring(body, 'AccessControlPolicy')
|
||||
owner = elem.find('Owner')
|
||||
self.assertEqual(owner.find('ID').text, self.conn.user_id)
|
||||
self.assertEqual(owner.find('DisplayName').text, self.conn.user_id)
|
||||
acl = elem.find('AccessControlList')
|
||||
self.assertTrue(acl.find('Grant') is not None)
|
||||
|
||||
def test_put_bucket_acl_error(self):
|
||||
req_headers = {'x-amz-acl': 'public-read'}
|
||||
aws_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = \
|
||||
aws_error_conn.make_request('PUT', self.bucket,
|
||||
headers=req_headers, query='acl')
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', 'nothing',
|
||||
headers=req_headers, query='acl')
|
||||
self.assertEqual(get_error_code(body), 'NoSuchBucket')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn2.make_request('PUT', self.bucket,
|
||||
headers=req_headers, query='acl')
|
||||
self.assertEqual(get_error_code(body), 'AccessDenied')
|
||||
|
||||
def test_get_bucket_acl_error(self):
|
||||
aws_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = \
|
||||
aws_error_conn.make_request('GET', self.bucket, query='acl')
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', 'nothing', query='acl')
|
||||
self.assertEqual(get_error_code(body), 'NoSuchBucket')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn2.make_request('GET', self.bucket, query='acl')
|
||||
self.assertEqual(get_error_code(body), 'AccessDenied')
|
||||
|
||||
def test_get_object_acl_error(self):
|
||||
self.conn.make_request('PUT', self.bucket, self.obj)
|
||||
|
||||
aws_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = \
|
||||
aws_error_conn.make_request('GET', self.bucket, self.obj,
|
||||
query='acl')
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', self.bucket, 'nothing', query='acl')
|
||||
self.assertEqual(get_error_code(body), 'NoSuchKey')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn2.make_request('GET', self.bucket, self.obj, query='acl')
|
||||
self.assertEqual(get_error_code(body), 'AccessDenied')
|
||||
|
||||
|
||||
class TestS3AclSigV4(TestS3Acl):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
os.environ['S3_USE_SIGV4'] = "True"
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
del os.environ['S3_USE_SIGV4']
|
||||
|
||||
def setUp(self):
|
||||
super(TestS3AclSigV4, self).setUp()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest2.main()
|
487
test/functional/s3api/test_bucket.py
Normal file
487
test/functional/s3api/test_bucket.py
Normal file
@ -0,0 +1,487 @@
|
||||
# Copyright (c) 2015 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest2
|
||||
import os
|
||||
|
||||
import test.functional as tf
|
||||
from swift.common.middleware.s3api.etree import fromstring, tostring, Element, \
|
||||
SubElement
|
||||
from test.functional.s3api import S3ApiBase
|
||||
from test.functional.s3api.s3_test_client import Connection
|
||||
from test.functional.s3api.utils import get_error_code
|
||||
|
||||
|
||||
def setUpModule():
|
||||
tf.setup_package()
|
||||
|
||||
|
||||
def tearDownModule():
|
||||
tf.teardown_package()
|
||||
|
||||
|
||||
class TestS3ApiBucket(S3ApiBase):
|
||||
def setUp(self):
|
||||
super(TestS3ApiBucket, self).setUp()
|
||||
|
||||
def _gen_location_xml(self, location):
|
||||
elem = Element('CreateBucketConfiguration')
|
||||
SubElement(elem, 'LocationConstraint').text = location
|
||||
return tostring(elem)
|
||||
|
||||
def test_bucket(self):
|
||||
bucket = 'bucket'
|
||||
max_bucket_listing = tf.cluster_info['s3api'].get(
|
||||
'max_bucket_listing', 1000)
|
||||
|
||||
# PUT Bucket
|
||||
status, headers, body = self.conn.make_request('PUT', bucket)
|
||||
self.assertEqual(status, 200)
|
||||
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertIn(headers['location'], (
|
||||
'/' + bucket, # swob won't touch it...
|
||||
# but webob (which we get because of auth_token) *does*
|
||||
'http://%s%s/%s' % (
|
||||
self.conn.host,
|
||||
'' if self.conn.port == 80 else ':%d' % self.conn.port,
|
||||
bucket),
|
||||
# This is all based on the Host header the client provided,
|
||||
# and boto will double-up ports for sig v4. See
|
||||
# - https://github.com/boto/boto/issues/2623
|
||||
# - https://github.com/boto/boto/issues/3716
|
||||
# with proposed fixes at
|
||||
# - https://github.com/boto/boto/pull/3513
|
||||
# - https://github.com/boto/boto/pull/3676
|
||||
'http://%s%s:%d/%s' % (
|
||||
self.conn.host,
|
||||
'' if self.conn.port == 80 else ':%d' % self.conn.port,
|
||||
self.conn.port,
|
||||
bucket),
|
||||
))
|
||||
self.assertEqual(headers['content-length'], '0')
|
||||
|
||||
# GET Bucket(Without Object)
|
||||
status, headers, body = self.conn.make_request('GET', bucket)
|
||||
self.assertEqual(status, 200)
|
||||
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertTrue(headers['content-type'] is not None)
|
||||
self.assertEqual(headers['content-length'], str(len(body)))
|
||||
# TODO; requires consideration
|
||||
# self.assertEqual(headers['transfer-encoding'], 'chunked')
|
||||
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('Name').text, bucket)
|
||||
self.assertIsNone(elem.find('Prefix').text)
|
||||
self.assertIsNone(elem.find('Marker').text)
|
||||
self.assertEqual(
|
||||
elem.find('MaxKeys').text, str(max_bucket_listing))
|
||||
self.assertEqual(elem.find('IsTruncated').text, 'false')
|
||||
objects = elem.findall('./Contents')
|
||||
self.assertEqual(list(objects), [])
|
||||
|
||||
# GET Bucket(With Object)
|
||||
req_objects = ('object', 'object2')
|
||||
for obj in req_objects:
|
||||
self.conn.make_request('PUT', bucket, obj)
|
||||
status, headers, body = self.conn.make_request('GET', bucket)
|
||||
self.assertEqual(status, 200)
|
||||
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('Name').text, bucket)
|
||||
self.assertIsNone(elem.find('Prefix').text)
|
||||
self.assertIsNone(elem.find('Marker').text)
|
||||
self.assertEqual(elem.find('MaxKeys').text,
|
||||
str(max_bucket_listing))
|
||||
self.assertEqual(elem.find('IsTruncated').text, 'false')
|
||||
resp_objects = elem.findall('./Contents')
|
||||
self.assertEqual(len(list(resp_objects)), 2)
|
||||
for o in resp_objects:
|
||||
self.assertTrue(o.find('Key').text in req_objects)
|
||||
self.assertTrue(o.find('LastModified').text is not None)
|
||||
self.assertRegexpMatches(
|
||||
o.find('LastModified').text,
|
||||
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
|
||||
self.assertTrue(o.find('ETag').text is not None)
|
||||
self.assertTrue(o.find('Size').text is not None)
|
||||
self.assertTrue(o.find('StorageClass').text is not None)
|
||||
self.assertTrue(o.find('Owner/ID').text, self.conn.user_id)
|
||||
self.assertTrue(o.find('Owner/DisplayName').text,
|
||||
self.conn.user_id)
|
||||
|
||||
# HEAD Bucket
|
||||
status, headers, body = self.conn.make_request('HEAD', bucket)
|
||||
self.assertEqual(status, 200)
|
||||
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertTrue(headers['content-type'] is not None)
|
||||
self.assertEqual(headers['content-length'], str(len(body)))
|
||||
# TODO; requires consideration
|
||||
# self.assertEqual(headers['transfer-encoding'], 'chunked')
|
||||
|
||||
# DELETE Bucket
|
||||
for obj in req_objects:
|
||||
self.conn.make_request('DELETE', bucket, obj)
|
||||
status, headers, body = self.conn.make_request('DELETE', bucket)
|
||||
self.assertEqual(status, 204)
|
||||
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
|
||||
def test_put_bucket_error(self):
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', 'bucket+invalid')
|
||||
self.assertEqual(get_error_code(body), 'InvalidBucketName')
|
||||
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = auth_error_conn.make_request('PUT', 'bucket')
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
||||
self.conn.make_request('PUT', 'bucket')
|
||||
status, headers, body = self.conn.make_request('PUT', 'bucket')
|
||||
self.assertEqual(get_error_code(body), 'BucketAlreadyExists')
|
||||
|
||||
def test_put_bucket_with_LocationConstraint(self):
|
||||
bucket = 'bucket'
|
||||
xml = self._gen_location_xml('US')
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', bucket, body=xml)
|
||||
self.assertEqual(status, 200)
|
||||
|
||||
def test_get_bucket_error(self):
|
||||
self.conn.make_request('PUT', 'bucket')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', 'bucket+invalid')
|
||||
self.assertEqual(get_error_code(body), 'InvalidBucketName')
|
||||
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = auth_error_conn.make_request('GET', 'bucket')
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
||||
status, headers, body = self.conn.make_request('GET', 'nothing')
|
||||
self.assertEqual(get_error_code(body), 'NoSuchBucket')
|
||||
|
||||
def _prepare_test_get_bucket(self, bucket, objects):
|
||||
self.conn.make_request('PUT', bucket)
|
||||
for obj in objects:
|
||||
self.conn.make_request('PUT', bucket, obj)
|
||||
|
||||
def test_get_bucket_with_delimiter(self):
|
||||
bucket = 'bucket'
|
||||
put_objects = ('object', 'object2', 'subdir/object', 'subdir2/object',
|
||||
'dir/subdir/object')
|
||||
self._prepare_test_get_bucket(bucket, put_objects)
|
||||
|
||||
delimiter = '/'
|
||||
query = 'delimiter=%s' % delimiter
|
||||
expect_objects = ('object', 'object2')
|
||||
expect_prefixes = ('dir/', 'subdir/', 'subdir2/')
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('Delimiter').text, delimiter)
|
||||
resp_objects = elem.findall('./Contents')
|
||||
self.assertEqual(len(list(resp_objects)), len(expect_objects))
|
||||
for i, o in enumerate(resp_objects):
|
||||
self.assertEqual(o.find('Key').text, expect_objects[i])
|
||||
self.assertTrue(o.find('LastModified').text is not None)
|
||||
self.assertRegexpMatches(
|
||||
o.find('LastModified').text,
|
||||
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
|
||||
self.assertTrue(o.find('ETag').text is not None)
|
||||
self.assertTrue(o.find('Size').text is not None)
|
||||
self.assertEqual(o.find('StorageClass').text, 'STANDARD')
|
||||
self.assertTrue(o.find('Owner/ID').text, self.conn.user_id)
|
||||
self.assertTrue(o.find('Owner/DisplayName').text,
|
||||
self.conn.user_id)
|
||||
resp_prefixes = elem.findall('CommonPrefixes')
|
||||
self.assertEqual(len(resp_prefixes), len(expect_prefixes))
|
||||
for i, p in enumerate(resp_prefixes):
|
||||
self.assertEqual(p.find('./Prefix').text, expect_prefixes[i])
|
||||
|
||||
def test_get_bucket_with_encoding_type(self):
|
||||
bucket = 'bucket'
|
||||
put_objects = ('object', 'object2')
|
||||
self._prepare_test_get_bucket(bucket, put_objects)
|
||||
|
||||
encoding_type = 'url'
|
||||
query = 'encoding-type=%s' % encoding_type
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('EncodingType').text, encoding_type)
|
||||
|
||||
def test_get_bucket_with_marker(self):
|
||||
bucket = 'bucket'
|
||||
put_objects = ('object', 'object2', 'subdir/object', 'subdir2/object',
|
||||
'dir/subdir/object')
|
||||
self._prepare_test_get_bucket(bucket, put_objects)
|
||||
|
||||
marker = 'object'
|
||||
query = 'marker=%s' % marker
|
||||
expect_objects = ('object2', 'subdir/object', 'subdir2/object')
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('Marker').text, marker)
|
||||
resp_objects = elem.findall('./Contents')
|
||||
self.assertEqual(len(list(resp_objects)), len(expect_objects))
|
||||
for i, o in enumerate(resp_objects):
|
||||
self.assertEqual(o.find('Key').text, expect_objects[i])
|
||||
self.assertTrue(o.find('LastModified').text is not None)
|
||||
self.assertRegexpMatches(
|
||||
o.find('LastModified').text,
|
||||
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
|
||||
self.assertTrue(o.find('ETag').text is not None)
|
||||
self.assertTrue(o.find('Size').text is not None)
|
||||
self.assertEqual(o.find('StorageClass').text, 'STANDARD')
|
||||
self.assertTrue(o.find('Owner/ID').text, self.conn.user_id)
|
||||
self.assertTrue(o.find('Owner/DisplayName').text,
|
||||
self.conn.user_id)
|
||||
|
||||
def test_get_bucket_with_max_keys(self):
|
||||
bucket = 'bucket'
|
||||
put_objects = ('object', 'object2', 'subdir/object', 'subdir2/object',
|
||||
'dir/subdir/object')
|
||||
self._prepare_test_get_bucket(bucket, put_objects)
|
||||
|
||||
max_keys = '2'
|
||||
query = 'max-keys=%s' % max_keys
|
||||
expect_objects = ('dir/subdir/object', 'object')
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('MaxKeys').text, max_keys)
|
||||
resp_objects = elem.findall('./Contents')
|
||||
self.assertEqual(len(list(resp_objects)), len(expect_objects))
|
||||
for i, o in enumerate(resp_objects):
|
||||
self.assertEqual(o.find('Key').text, expect_objects[i])
|
||||
self.assertTrue(o.find('LastModified').text is not None)
|
||||
self.assertRegexpMatches(
|
||||
o.find('LastModified').text,
|
||||
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
|
||||
self.assertTrue(o.find('ETag').text is not None)
|
||||
self.assertTrue(o.find('Size').text is not None)
|
||||
self.assertEqual(o.find('StorageClass').text, 'STANDARD')
|
||||
self.assertTrue(o.find('Owner/ID').text, self.conn.user_id)
|
||||
self.assertTrue(o.find('Owner/DisplayName').text,
|
||||
self.conn.user_id)
|
||||
|
||||
def test_get_bucket_with_prefix(self):
|
||||
bucket = 'bucket'
|
||||
req_objects = ('object', 'object2', 'subdir/object', 'subdir2/object',
|
||||
'dir/subdir/object')
|
||||
self._prepare_test_get_bucket(bucket, req_objects)
|
||||
|
||||
prefix = 'object'
|
||||
query = 'prefix=%s' % prefix
|
||||
expect_objects = ('object', 'object2')
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('Prefix').text, prefix)
|
||||
resp_objects = elem.findall('./Contents')
|
||||
self.assertEqual(len(list(resp_objects)), len(expect_objects))
|
||||
for i, o in enumerate(resp_objects):
|
||||
self.assertEqual(o.find('Key').text, expect_objects[i])
|
||||
self.assertTrue(o.find('LastModified').text is not None)
|
||||
self.assertRegexpMatches(
|
||||
o.find('LastModified').text,
|
||||
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
|
||||
self.assertTrue(o.find('ETag').text is not None)
|
||||
self.assertTrue(o.find('Size').text is not None)
|
||||
self.assertEqual(o.find('StorageClass').text, 'STANDARD')
|
||||
self.assertTrue(o.find('Owner/ID').text, self.conn.user_id)
|
||||
self.assertTrue(o.find('Owner/DisplayName').text,
|
||||
self.conn.user_id)
|
||||
|
||||
def test_get_bucket_v2_with_start_after(self):
|
||||
bucket = 'bucket'
|
||||
put_objects = ('object', 'object2', 'subdir/object', 'subdir2/object',
|
||||
'dir/subdir/object')
|
||||
self._prepare_test_get_bucket(bucket, put_objects)
|
||||
|
||||
marker = 'object'
|
||||
query = 'list-type=2&start-after=%s' % marker
|
||||
expect_objects = ('object2', 'subdir/object', 'subdir2/object')
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('StartAfter').text, marker)
|
||||
resp_objects = elem.findall('./Contents')
|
||||
self.assertEqual(len(list(resp_objects)), len(expect_objects))
|
||||
for i, o in enumerate(resp_objects):
|
||||
self.assertEqual(o.find('Key').text, expect_objects[i])
|
||||
self.assertTrue(o.find('LastModified').text is not None)
|
||||
self.assertRegexpMatches(
|
||||
o.find('LastModified').text,
|
||||
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
|
||||
self.assertTrue(o.find('ETag').text is not None)
|
||||
self.assertTrue(o.find('Size').text is not None)
|
||||
self.assertEqual(o.find('StorageClass').text, 'STANDARD')
|
||||
self.assertIsNone(o.find('Owner/ID'))
|
||||
self.assertIsNone(o.find('Owner/DisplayName'))
|
||||
|
||||
def test_get_bucket_v2_with_fetch_owner(self):
|
||||
bucket = 'bucket'
|
||||
put_objects = ('object', 'object2', 'subdir/object', 'subdir2/object',
|
||||
'dir/subdir/object')
|
||||
self._prepare_test_get_bucket(bucket, put_objects)
|
||||
|
||||
query = 'list-type=2&fetch-owner=true'
|
||||
expect_objects = ('dir/subdir/object', 'object', 'object2',
|
||||
'subdir/object', 'subdir2/object')
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('KeyCount').text, '5')
|
||||
resp_objects = elem.findall('./Contents')
|
||||
self.assertEqual(len(list(resp_objects)), len(expect_objects))
|
||||
for i, o in enumerate(resp_objects):
|
||||
self.assertEqual(o.find('Key').text, expect_objects[i])
|
||||
self.assertTrue(o.find('LastModified').text is not None)
|
||||
self.assertRegexpMatches(
|
||||
o.find('LastModified').text,
|
||||
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
|
||||
self.assertTrue(o.find('ETag').text is not None)
|
||||
self.assertTrue(o.find('Size').text is not None)
|
||||
self.assertEqual(o.find('StorageClass').text, 'STANDARD')
|
||||
self.assertTrue(o.find('Owner/ID').text, self.conn.user_id)
|
||||
self.assertTrue(o.find('Owner/DisplayName').text,
|
||||
self.conn.user_id)
|
||||
|
||||
def test_get_bucket_v2_with_continuation_token(self):
|
||||
bucket = 'bucket'
|
||||
put_objects = ('object', 'object2', 'subdir/object', 'subdir2/object',
|
||||
'dir/subdir/object')
|
||||
self._prepare_test_get_bucket(bucket, put_objects)
|
||||
|
||||
query = 'list-type=2&max-keys=3'
|
||||
expect_objects = ('dir/subdir/object', 'object', 'object2')
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('MaxKeys').text, '3')
|
||||
self.assertEqual(elem.find('KeyCount').text, '3')
|
||||
self.assertEqual(elem.find('IsTruncated').text, 'true')
|
||||
next_cont_token_elem = elem.find('NextContinuationToken')
|
||||
self.assertIsNotNone(next_cont_token_elem)
|
||||
resp_objects = elem.findall('./Contents')
|
||||
self.assertEqual(len(list(resp_objects)), len(expect_objects))
|
||||
for i, o in enumerate(resp_objects):
|
||||
self.assertEqual(o.find('Key').text, expect_objects[i])
|
||||
self.assertTrue(o.find('LastModified').text is not None)
|
||||
self.assertRegexpMatches(
|
||||
o.find('LastModified').text,
|
||||
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
|
||||
self.assertTrue(o.find('ETag').text is not None)
|
||||
self.assertTrue(o.find('Size').text is not None)
|
||||
self.assertEqual(o.find('StorageClass').text, 'STANDARD')
|
||||
self.assertIsNone(o.find('Owner/ID'))
|
||||
self.assertIsNone(o.find('Owner/DisplayName'))
|
||||
|
||||
query = 'list-type=2&max-keys=3&continuation-token=%s' % \
|
||||
next_cont_token_elem.text
|
||||
expect_objects = ('subdir/object', 'subdir2/object')
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('MaxKeys').text, '3')
|
||||
self.assertEqual(elem.find('KeyCount').text, '2')
|
||||
self.assertEqual(elem.find('IsTruncated').text, 'false')
|
||||
self.assertIsNone(elem.find('NextContinuationToken'))
|
||||
cont_token_elem = elem.find('ContinuationToken')
|
||||
self.assertEqual(cont_token_elem.text, next_cont_token_elem.text)
|
||||
resp_objects = elem.findall('./Contents')
|
||||
self.assertEqual(len(list(resp_objects)), len(expect_objects))
|
||||
for i, o in enumerate(resp_objects):
|
||||
self.assertEqual(o.find('Key').text, expect_objects[i])
|
||||
self.assertTrue(o.find('LastModified').text is not None)
|
||||
self.assertRegexpMatches(
|
||||
o.find('LastModified').text,
|
||||
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
|
||||
self.assertTrue(o.find('ETag').text is not None)
|
||||
self.assertTrue(o.find('Size').text is not None)
|
||||
self.assertEqual(o.find('StorageClass').text, 'STANDARD')
|
||||
self.assertIsNone(o.find('Owner/ID'))
|
||||
self.assertIsNone(o.find('Owner/DisplayName'))
|
||||
|
||||
def test_head_bucket_error(self):
|
||||
self.conn.make_request('PUT', 'bucket')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('HEAD', 'bucket+invalid')
|
||||
self.assertEqual(status, 400)
|
||||
self.assertEqual(body, '') # sanity
|
||||
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('HEAD', 'bucket')
|
||||
self.assertEqual(status, 403)
|
||||
self.assertEqual(body, '') # sanity
|
||||
|
||||
status, headers, body = self.conn.make_request('HEAD', 'nothing')
|
||||
self.assertEqual(status, 404)
|
||||
self.assertEqual(body, '') # sanity
|
||||
|
||||
def test_delete_bucket_error(self):
|
||||
status, headers, body = \
|
||||
self.conn.make_request('DELETE', 'bucket+invalid')
|
||||
self.assertEqual(get_error_code(body), 'InvalidBucketName')
|
||||
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('DELETE', 'bucket')
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
||||
status, headers, body = self.conn.make_request('DELETE', 'bucket')
|
||||
self.assertEqual(get_error_code(body), 'NoSuchBucket')
|
||||
|
||||
def test_bucket_invalid_method_error(self):
|
||||
# non existed verb in the controller
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GETPUT', 'bucket')
|
||||
self.assertEqual(get_error_code(body), 'MethodNotAllowed')
|
||||
# the method exists in the controller but deny as MethodNotAllowed
|
||||
status, headers, body = \
|
||||
self.conn.make_request('_delete_segments_bucket', 'bucket')
|
||||
self.assertEqual(get_error_code(body), 'MethodNotAllowed')
|
||||
|
||||
|
||||
class TestS3ApiBucketSigV4(TestS3ApiBucket):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
os.environ['S3_USE_SIGV4'] = "True"
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
del os.environ['S3_USE_SIGV4']
|
||||
|
||||
def setUp(self):
|
||||
super(TestS3ApiBucket, self).setUp()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest2.main()
|
248
test/functional/s3api/test_multi_delete.py
Normal file
248
test/functional/s3api/test_multi_delete.py
Normal file
@ -0,0 +1,248 @@
|
||||
# Copyright (c) 2015 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest2
|
||||
import os
|
||||
import test.functional as tf
|
||||
from swift.common.middleware.s3api.etree import fromstring, tostring, Element, \
|
||||
SubElement
|
||||
from swift.common.middleware.s3api.controllers.multi_delete import \
|
||||
MAX_MULTI_DELETE_BODY_SIZE
|
||||
|
||||
from test.functional.s3api import S3ApiBase
|
||||
from test.functional.s3api.s3_test_client import Connection
|
||||
from test.functional.s3api.utils import get_error_code, calculate_md5
|
||||
|
||||
|
||||
def setUpModule():
|
||||
tf.setup_package()
|
||||
|
||||
|
||||
def tearDownModule():
|
||||
tf.teardown_package()
|
||||
|
||||
|
||||
class TestS3ApiMultiDelete(S3ApiBase):
|
||||
def setUp(self):
|
||||
super(TestS3ApiMultiDelete, self).setUp()
|
||||
|
||||
def _prepare_test_delete_multi_objects(self, bucket, objects):
|
||||
self.conn.make_request('PUT', bucket)
|
||||
for obj in objects:
|
||||
self.conn.make_request('PUT', bucket, obj)
|
||||
|
||||
def _gen_multi_delete_xml(self, objects, quiet=None):
|
||||
elem = Element('Delete')
|
||||
if quiet:
|
||||
SubElement(elem, 'Quiet').text = quiet
|
||||
for key in objects:
|
||||
obj = SubElement(elem, 'Object')
|
||||
SubElement(obj, 'Key').text = key
|
||||
|
||||
return tostring(elem, use_s3ns=False)
|
||||
|
||||
def _gen_invalid_multi_delete_xml(self, hasObjectTag=False):
|
||||
elem = Element('Delete')
|
||||
if hasObjectTag:
|
||||
obj = SubElement(elem, 'Object')
|
||||
SubElement(obj, 'Key').text = ''
|
||||
|
||||
return tostring(elem, use_s3ns=False)
|
||||
|
||||
def test_delete_multi_objects(self):
|
||||
bucket = 'bucket'
|
||||
put_objects = ['obj%s' % var for var in xrange(4)]
|
||||
self._prepare_test_delete_multi_objects(bucket, put_objects)
|
||||
query = 'delete'
|
||||
|
||||
# Delete an object via MultiDelete API
|
||||
req_objects = ['obj0']
|
||||
xml = self._gen_multi_delete_xml(req_objects)
|
||||
content_md5 = calculate_md5(xml)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, body=xml,
|
||||
headers={'Content-MD5': content_md5},
|
||||
query=query)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertTrue(headers['content-type'] is not None)
|
||||
self.assertEqual(headers['content-length'], str(len(body)))
|
||||
elem = fromstring(body)
|
||||
resp_objects = elem.findall('Deleted')
|
||||
self.assertEqual(len(resp_objects), len(req_objects))
|
||||
for o in resp_objects:
|
||||
self.assertTrue(o.find('Key').text in req_objects)
|
||||
|
||||
# Delete 2 objects via MultiDelete API
|
||||
req_objects = ['obj1', 'obj2']
|
||||
xml = self._gen_multi_delete_xml(req_objects)
|
||||
content_md5 = calculate_md5(xml)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, body=xml,
|
||||
headers={'Content-MD5': content_md5},
|
||||
query=query)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'DeleteResult')
|
||||
resp_objects = elem.findall('Deleted')
|
||||
self.assertEqual(len(resp_objects), len(req_objects))
|
||||
for o in resp_objects:
|
||||
self.assertTrue(o.find('Key').text in req_objects)
|
||||
|
||||
# Delete 2 objects via MultiDelete API but one (obj4) doesn't exist.
|
||||
req_objects = ['obj3', 'obj4']
|
||||
xml = self._gen_multi_delete_xml(req_objects)
|
||||
content_md5 = calculate_md5(xml)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, body=xml,
|
||||
headers={'Content-MD5': content_md5},
|
||||
query=query)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'DeleteResult')
|
||||
resp_objects = elem.findall('Deleted')
|
||||
# S3 assumes a NoSuchKey object as deleted.
|
||||
self.assertEqual(len(resp_objects), len(req_objects))
|
||||
for o in resp_objects:
|
||||
self.assertTrue(o.find('Key').text in req_objects)
|
||||
|
||||
# Delete 2 objects via MultiDelete API but no objects exist
|
||||
req_objects = ['obj4', 'obj5']
|
||||
xml = self._gen_multi_delete_xml(req_objects)
|
||||
content_md5 = calculate_md5(xml)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, body=xml,
|
||||
headers={'Content-MD5': content_md5},
|
||||
query=query)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'DeleteResult')
|
||||
resp_objects = elem.findall('Deleted')
|
||||
self.assertEqual(len(resp_objects), len(req_objects))
|
||||
for o in resp_objects:
|
||||
self.assertTrue(o.find('Key').text in req_objects)
|
||||
|
||||
def test_delete_multi_objects_error(self):
|
||||
bucket = 'bucket'
|
||||
put_objects = ['obj']
|
||||
self._prepare_test_delete_multi_objects(bucket, put_objects)
|
||||
xml = self._gen_multi_delete_xml(put_objects)
|
||||
content_md5 = calculate_md5(xml)
|
||||
query = 'delete'
|
||||
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('POST', bucket, body=xml,
|
||||
headers={
|
||||
'Content-MD5': content_md5
|
||||
},
|
||||
query=query)
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', 'nothing', body=xml,
|
||||
headers={'Content-MD5': content_md5},
|
||||
query=query)
|
||||
self.assertEqual(get_error_code(body), 'NoSuchBucket')
|
||||
|
||||
# without Object tag
|
||||
xml = self._gen_invalid_multi_delete_xml()
|
||||
content_md5 = calculate_md5(xml)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, body=xml,
|
||||
headers={'Content-MD5': content_md5},
|
||||
query=query)
|
||||
self.assertEqual(get_error_code(body), 'MalformedXML')
|
||||
|
||||
# without value of Key tag
|
||||
xml = self._gen_invalid_multi_delete_xml(hasObjectTag=True)
|
||||
content_md5 = calculate_md5(xml)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, body=xml,
|
||||
headers={'Content-MD5': content_md5},
|
||||
query=query)
|
||||
self.assertEqual(get_error_code(body), 'UserKeyMustBeSpecified')
|
||||
|
||||
# specified number of objects are over max_multi_delete_objects
|
||||
# (Default 1000), but xml size is smaller than 61365 bytes.
|
||||
req_objects = ['obj%s' for var in xrange(1001)]
|
||||
xml = self._gen_multi_delete_xml(req_objects)
|
||||
self.assertTrue(len(xml.encode('utf-8')) <= MAX_MULTI_DELETE_BODY_SIZE)
|
||||
content_md5 = calculate_md5(xml)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, body=xml,
|
||||
headers={'Content-MD5': content_md5},
|
||||
query=query)
|
||||
self.assertEqual(get_error_code(body), 'MalformedXML')
|
||||
|
||||
# specified xml size is over 61365 bytes, but number of objects are
|
||||
# smaller than max_multi_delete_objects.
|
||||
obj = 'a' * 1024
|
||||
req_objects = [obj + str(var) for var in xrange(999)]
|
||||
xml = self._gen_multi_delete_xml(req_objects)
|
||||
self.assertTrue(len(xml.encode('utf-8')) > MAX_MULTI_DELETE_BODY_SIZE)
|
||||
content_md5 = calculate_md5(xml)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, body=xml,
|
||||
headers={'Content-MD5': content_md5},
|
||||
query=query)
|
||||
self.assertEqual(get_error_code(body), 'MalformedXML')
|
||||
|
||||
def test_delete_multi_objects_with_quiet(self):
|
||||
bucket = 'bucket'
|
||||
put_objects = ['obj']
|
||||
query = 'delete'
|
||||
|
||||
# with Quiet true
|
||||
quiet = 'true'
|
||||
self._prepare_test_delete_multi_objects(bucket, put_objects)
|
||||
xml = self._gen_multi_delete_xml(put_objects, quiet)
|
||||
content_md5 = calculate_md5(xml)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, body=xml,
|
||||
headers={'Content-MD5': content_md5},
|
||||
query=query)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'DeleteResult')
|
||||
resp_objects = elem.findall('Deleted')
|
||||
self.assertEqual(len(resp_objects), 0)
|
||||
|
||||
# with Quiet false
|
||||
quiet = 'false'
|
||||
self._prepare_test_delete_multi_objects(bucket, put_objects)
|
||||
xml = self._gen_multi_delete_xml(put_objects, quiet)
|
||||
content_md5 = calculate_md5(xml)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, body=xml,
|
||||
headers={'Content-MD5': content_md5},
|
||||
query=query)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'DeleteResult')
|
||||
resp_objects = elem.findall('Deleted')
|
||||
self.assertEqual(len(resp_objects), 1)
|
||||
|
||||
|
||||
class TestS3ApiMultiDeleteSigV4(TestS3ApiMultiDelete):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
os.environ['S3_USE_SIGV4'] = "True"
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
del os.environ['S3_USE_SIGV4']
|
||||
|
||||
def setUp(self):
|
||||
super(TestS3ApiMultiDeleteSigV4, self).setUp()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest2.main()
|
849
test/functional/s3api/test_multi_upload.py
Normal file
849
test/functional/s3api/test_multi_upload.py
Normal file
@ -0,0 +1,849 @@
|
||||
# Copyright (c) 2015 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import base64
|
||||
import unittest2
|
||||
import os
|
||||
import boto
|
||||
|
||||
# For an issue with venv and distutils, disable pylint message here
|
||||
# pylint: disable-msg=E0611,F0401
|
||||
from distutils.version import StrictVersion
|
||||
|
||||
from hashlib import md5
|
||||
from itertools import izip, izip_longest
|
||||
|
||||
import test.functional as tf
|
||||
from swift.common.middleware.s3api.etree import fromstring, tostring, Element, \
|
||||
SubElement
|
||||
from swift.common.middleware.s3api.utils import mktime
|
||||
|
||||
from test.functional.s3api import S3ApiBase
|
||||
from test.functional.s3api.s3_test_client import Connection
|
||||
from test.functional.s3api.utils import get_error_code, get_error_msg
|
||||
|
||||
|
||||
def setUpModule():
|
||||
tf.setup_package()
|
||||
|
||||
|
||||
def tearDownModule():
|
||||
tf.teardown_package()
|
||||
|
||||
|
||||
class TestS3ApiMultiUpload(S3ApiBase):
|
||||
def setUp(self):
|
||||
super(TestS3ApiMultiUpload, self).setUp()
|
||||
if not tf.cluster_info['s3api'].get('allow_multipart_uploads', False):
|
||||
raise tf.SkipTest('multipart upload is not enebled')
|
||||
|
||||
self.min_segment_size = int(tf.cluster_info['s3api'].get(
|
||||
'min_segment_size', 5242880))
|
||||
|
||||
def _gen_comp_xml(self, etags):
|
||||
elem = Element('CompleteMultipartUpload')
|
||||
for i, etag in enumerate(etags):
|
||||
elem_part = SubElement(elem, 'Part')
|
||||
SubElement(elem_part, 'PartNumber').text = str(i + 1)
|
||||
SubElement(elem_part, 'ETag').text = etag
|
||||
return tostring(elem)
|
||||
|
||||
def _initiate_multi_uploads_result_generator(self, bucket, keys,
|
||||
headers=None, trials=1):
|
||||
if headers is None:
|
||||
headers = [None] * len(keys)
|
||||
self.conn.make_request('PUT', bucket)
|
||||
query = 'uploads'
|
||||
for key, key_headers in izip_longest(keys, headers):
|
||||
for i in xrange(trials):
|
||||
status, resp_headers, body = \
|
||||
self.conn.make_request('POST', bucket, key,
|
||||
headers=key_headers, query=query)
|
||||
yield status, resp_headers, body
|
||||
|
||||
def _upload_part(self, bucket, key, upload_id, content=None, part_num=1):
|
||||
query = 'partNumber=%s&uploadId=%s' % (part_num, upload_id)
|
||||
content = content if content else 'a' * self.min_segment_size
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', bucket, key, body=content,
|
||||
query=query)
|
||||
return status, headers, body
|
||||
|
||||
def _upload_part_copy(self, src_bucket, src_obj, dst_bucket, dst_key,
|
||||
upload_id, part_num=1, src_range=None):
|
||||
|
||||
src_path = '%s/%s' % (src_bucket, src_obj)
|
||||
query = 'partNumber=%s&uploadId=%s' % (part_num, upload_id)
|
||||
req_headers = {'X-Amz-Copy-Source': src_path}
|
||||
if src_range:
|
||||
req_headers['X-Amz-Copy-Source-Range'] = src_range
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', dst_bucket, dst_key,
|
||||
headers=req_headers,
|
||||
query=query)
|
||||
elem = fromstring(body, 'CopyPartResult')
|
||||
etag = elem.find('ETag').text.strip('"')
|
||||
return status, headers, body, etag
|
||||
|
||||
def _complete_multi_upload(self, bucket, key, upload_id, xml):
|
||||
query = 'uploadId=%s' % upload_id
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, key, body=xml,
|
||||
query=query)
|
||||
return status, headers, body
|
||||
|
||||
def test_object_multi_upload(self):
|
||||
bucket = 'bucket'
|
||||
keys = ['obj1', 'obj2', 'obj3']
|
||||
headers = [None,
|
||||
{'Content-MD5': base64.b64encode('a' * 16).strip()},
|
||||
{'Etag': 'nonsense'}]
|
||||
uploads = []
|
||||
|
||||
results_generator = self._initiate_multi_uploads_result_generator(
|
||||
bucket, keys, headers=headers)
|
||||
|
||||
# Initiate Multipart Upload
|
||||
for expected_key, (status, headers, body) in \
|
||||
izip(keys, results_generator):
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertTrue('content-type' in headers)
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
self.assertTrue('content-length' in headers)
|
||||
self.assertEqual(headers['content-length'], str(len(body)))
|
||||
elem = fromstring(body, 'InitiateMultipartUploadResult')
|
||||
self.assertEqual(elem.find('Bucket').text, bucket)
|
||||
key = elem.find('Key').text
|
||||
self.assertEqual(expected_key, key)
|
||||
upload_id = elem.find('UploadId').text
|
||||
self.assertTrue(upload_id is not None)
|
||||
self.assertTrue((key, upload_id) not in uploads)
|
||||
uploads.append((key, upload_id))
|
||||
|
||||
self.assertEqual(len(uploads), len(keys)) # sanity
|
||||
|
||||
# List Multipart Uploads
|
||||
query = 'uploads'
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertTrue('content-type' in headers)
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
self.assertTrue('content-length' in headers)
|
||||
self.assertEqual(headers['content-length'], str(len(body)))
|
||||
elem = fromstring(body, 'ListMultipartUploadsResult')
|
||||
self.assertEqual(elem.find('Bucket').text, bucket)
|
||||
self.assertIsNone(elem.find('KeyMarker').text)
|
||||
self.assertEqual(elem.find('NextKeyMarker').text, uploads[-1][0])
|
||||
self.assertIsNone(elem.find('UploadIdMarker').text)
|
||||
self.assertEqual(elem.find('NextUploadIdMarker').text, uploads[-1][1])
|
||||
self.assertEqual(elem.find('MaxUploads').text, '1000')
|
||||
self.assertTrue(elem.find('EncodingType') is None)
|
||||
self.assertEqual(elem.find('IsTruncated').text, 'false')
|
||||
self.assertEqual(len(elem.findall('Upload')), 3)
|
||||
for (expected_key, expected_upload_id), u in \
|
||||
izip(uploads, elem.findall('Upload')):
|
||||
key = u.find('Key').text
|
||||
upload_id = u.find('UploadId').text
|
||||
self.assertEqual(expected_key, key)
|
||||
self.assertEqual(expected_upload_id, upload_id)
|
||||
self.assertEqual(u.find('Initiator/ID').text,
|
||||
self.conn.user_id)
|
||||
self.assertEqual(u.find('Initiator/DisplayName').text,
|
||||
self.conn.user_id)
|
||||
self.assertEqual(u.find('Owner/ID').text, self.conn.user_id)
|
||||
self.assertEqual(u.find('Owner/DisplayName').text,
|
||||
self.conn.user_id)
|
||||
self.assertEqual(u.find('StorageClass').text, 'STANDARD')
|
||||
self.assertTrue(u.find('Initiated').text is not None)
|
||||
|
||||
# Upload Part
|
||||
key, upload_id = uploads[0]
|
||||
content = 'a' * self.min_segment_size
|
||||
etag = md5(content).hexdigest()
|
||||
status, headers, body = \
|
||||
self._upload_part(bucket, key, upload_id, content)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers, etag)
|
||||
self.assertTrue('content-type' in headers)
|
||||
self.assertEqual(headers['content-type'], 'text/html; charset=UTF-8')
|
||||
self.assertTrue('content-length' in headers)
|
||||
self.assertEqual(headers['content-length'], '0')
|
||||
expected_parts_list = [(headers['etag'], mktime(headers['date']))]
|
||||
|
||||
# Upload Part Copy
|
||||
key, upload_id = uploads[1]
|
||||
src_bucket = 'bucket2'
|
||||
src_obj = 'obj3'
|
||||
src_content = 'b' * self.min_segment_size
|
||||
etag = md5(src_content).hexdigest()
|
||||
|
||||
# prepare src obj
|
||||
self.conn.make_request('PUT', src_bucket)
|
||||
self.conn.make_request('PUT', src_bucket, src_obj, body=src_content)
|
||||
_, headers, _ = self.conn.make_request('HEAD', src_bucket, src_obj)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
|
||||
status, headers, body, resp_etag = \
|
||||
self._upload_part_copy(src_bucket, src_obj, bucket,
|
||||
key, upload_id)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertTrue('content-type' in headers)
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
self.assertTrue('content-length' in headers)
|
||||
self.assertEqual(headers['content-length'], str(len(body)))
|
||||
self.assertTrue('etag' not in headers)
|
||||
elem = fromstring(body, 'CopyPartResult')
|
||||
|
||||
last_modified = elem.find('LastModified').text
|
||||
self.assertTrue(last_modified is not None)
|
||||
|
||||
self.assertEqual(resp_etag, etag)
|
||||
|
||||
# Check last-modified timestamp
|
||||
key, upload_id = uploads[1]
|
||||
query = 'uploadId=%s' % upload_id
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, key, query=query)
|
||||
|
||||
self.assertEqual(200, status)
|
||||
elem = fromstring(body, 'ListPartsResult')
|
||||
|
||||
# FIXME: COPY result drops milli/microseconds but GET doesn't
|
||||
last_modified_gets = [p.find('LastModified').text
|
||||
for p in elem.iterfind('Part')]
|
||||
self.assertEqual(
|
||||
last_modified_gets[0].rsplit('.', 1)[0],
|
||||
last_modified.rsplit('.', 1)[0],
|
||||
'%r != %r' % (last_modified_gets[0], last_modified))
|
||||
# There should be *exactly* two parts in the result
|
||||
self.assertEqual(1, len(last_modified_gets))
|
||||
|
||||
# List Parts
|
||||
key, upload_id = uploads[0]
|
||||
query = 'uploadId=%s' % upload_id
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, key, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertTrue('content-type' in headers)
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
self.assertTrue('content-length' in headers)
|
||||
self.assertEqual(headers['content-length'], str(len(body)))
|
||||
elem = fromstring(body, 'ListPartsResult')
|
||||
self.assertEqual(elem.find('Bucket').text, bucket)
|
||||
self.assertEqual(elem.find('Key').text, key)
|
||||
self.assertEqual(elem.find('UploadId').text, upload_id)
|
||||
self.assertEqual(elem.find('Initiator/ID').text, self.conn.user_id)
|
||||
self.assertEqual(elem.find('Initiator/DisplayName').text,
|
||||
self.conn.user_id)
|
||||
self.assertEqual(elem.find('Owner/ID').text, self.conn.user_id)
|
||||
self.assertEqual(elem.find('Owner/DisplayName').text,
|
||||
self.conn.user_id)
|
||||
self.assertEqual(elem.find('StorageClass').text, 'STANDARD')
|
||||
self.assertEqual(elem.find('PartNumberMarker').text, '0')
|
||||
self.assertEqual(elem.find('NextPartNumberMarker').text, '1')
|
||||
self.assertEqual(elem.find('MaxParts').text, '1000')
|
||||
self.assertEqual(elem.find('IsTruncated').text, 'false')
|
||||
self.assertEqual(len(elem.findall('Part')), 1)
|
||||
|
||||
# etags will be used to generate xml for Complete Multipart Upload
|
||||
etags = []
|
||||
for (expected_etag, expected_date), p in \
|
||||
izip(expected_parts_list, elem.findall('Part')):
|
||||
last_modified = p.find('LastModified').text
|
||||
self.assertTrue(last_modified is not None)
|
||||
# TODO: sanity check
|
||||
# (kota_) How do we check the sanity?
|
||||
# the last-modified header drops milli-seconds info
|
||||
# by the constraint of the format.
|
||||
# For now, we can do either the format check or round check
|
||||
# last_modified_from_xml = mktime(last_modified)
|
||||
# self.assertEqual(expected_date,
|
||||
# last_modified_from_xml)
|
||||
self.assertEqual(expected_etag, p.find('ETag').text)
|
||||
self.assertEqual(self.min_segment_size, int(p.find('Size').text))
|
||||
etags.append(p.find('ETag').text)
|
||||
|
||||
# Abort Multipart Uploads
|
||||
# note that uploads[1] has part data while uploads[2] does not
|
||||
for key, upload_id in uploads[1:]:
|
||||
query = 'uploadId=%s' % upload_id
|
||||
status, headers, body = \
|
||||
self.conn.make_request('DELETE', bucket, key, query=query)
|
||||
self.assertEqual(status, 204)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertTrue('content-type' in headers)
|
||||
self.assertEqual(headers['content-type'],
|
||||
'text/html; charset=UTF-8')
|
||||
self.assertTrue('content-length' in headers)
|
||||
self.assertEqual(headers['content-length'], '0')
|
||||
|
||||
# Complete Multipart Upload
|
||||
key, upload_id = uploads[0]
|
||||
xml = self._gen_comp_xml(etags)
|
||||
status, headers, body = \
|
||||
self._complete_multi_upload(bucket, key, upload_id, xml)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertTrue('content-type' in headers)
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
self.assertTrue('content-length' in headers)
|
||||
self.assertEqual(headers['content-length'], str(len(body)))
|
||||
elem = fromstring(body, 'CompleteMultipartUploadResult')
|
||||
# TODO: use tf.config value
|
||||
self.assertEqual(
|
||||
'http://%s:%s/bucket/obj1' % (self.conn.host, self.conn.port),
|
||||
elem.find('Location').text)
|
||||
self.assertEqual(elem.find('Bucket').text, bucket)
|
||||
self.assertEqual(elem.find('Key').text, key)
|
||||
# TODO: confirm completed etag value
|
||||
self.assertTrue(elem.find('ETag').text is not None)
|
||||
|
||||
def test_initiate_multi_upload_error(self):
|
||||
bucket = 'bucket'
|
||||
key = 'obj'
|
||||
self.conn.make_request('PUT', bucket)
|
||||
query = 'uploads'
|
||||
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('POST', bucket, key, query=query)
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
||||
status, resp_headers, body = \
|
||||
self.conn.make_request('POST', 'nothing', key, query=query)
|
||||
self.assertEqual(get_error_code(body), 'NoSuchBucket')
|
||||
|
||||
def test_list_multi_uploads_error(self):
|
||||
bucket = 'bucket'
|
||||
self.conn.make_request('PUT', bucket)
|
||||
query = 'uploads'
|
||||
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('GET', bucket, query=query)
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', 'nothing', query=query)
|
||||
self.assertEqual(get_error_code(body), 'NoSuchBucket')
|
||||
|
||||
def test_upload_part_error(self):
|
||||
bucket = 'bucket'
|
||||
self.conn.make_request('PUT', bucket)
|
||||
query = 'uploads'
|
||||
key = 'obj'
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, key, query=query)
|
||||
elem = fromstring(body, 'InitiateMultipartUploadResult')
|
||||
upload_id = elem.find('UploadId').text
|
||||
|
||||
query = 'partNumber=%s&uploadId=%s' % (1, upload_id)
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('PUT', bucket, key, query=query)
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', 'nothing', key, query=query)
|
||||
self.assertEqual(get_error_code(body), 'NoSuchBucket')
|
||||
|
||||
query = 'partNumber=%s&uploadId=%s' % (1, 'nothing')
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', bucket, key, query=query)
|
||||
self.assertEqual(get_error_code(body), 'NoSuchUpload')
|
||||
|
||||
query = 'partNumber=%s&uploadId=%s' % (0, upload_id)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', bucket, key, query=query)
|
||||
self.assertEqual(get_error_code(body), 'InvalidArgument')
|
||||
err_msg = 'Part number must be an integer between 1 and'
|
||||
self.assertTrue(err_msg in get_error_msg(body))
|
||||
|
||||
def test_upload_part_copy_error(self):
|
||||
src_bucket = 'src'
|
||||
src_obj = 'src'
|
||||
self.conn.make_request('PUT', src_bucket)
|
||||
self.conn.make_request('PUT', src_bucket, src_obj)
|
||||
src_path = '%s/%s' % (src_bucket, src_obj)
|
||||
|
||||
bucket = 'bucket'
|
||||
self.conn.make_request('PUT', bucket)
|
||||
key = 'obj'
|
||||
query = 'uploads'
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, key, query=query)
|
||||
elem = fromstring(body, 'InitiateMultipartUploadResult')
|
||||
upload_id = elem.find('UploadId').text
|
||||
|
||||
query = 'partNumber=%s&uploadId=%s' % (1, upload_id)
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('PUT', bucket, key,
|
||||
headers={
|
||||
'X-Amz-Copy-Source': src_path
|
||||
},
|
||||
query=query)
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', 'nothing', key,
|
||||
headers={'X-Amz-Copy-Source': src_path},
|
||||
query=query)
|
||||
self.assertEqual(get_error_code(body), 'NoSuchBucket')
|
||||
|
||||
query = 'partNumber=%s&uploadId=%s' % (1, 'nothing')
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', bucket, key,
|
||||
headers={'X-Amz-Copy-Source': src_path},
|
||||
query=query)
|
||||
self.assertEqual(get_error_code(body), 'NoSuchUpload')
|
||||
|
||||
src_path = '%s/%s' % (src_bucket, 'nothing')
|
||||
query = 'partNumber=%s&uploadId=%s' % (1, upload_id)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', bucket, key,
|
||||
headers={'X-Amz-Copy-Source': src_path},
|
||||
query=query)
|
||||
self.assertEqual(get_error_code(body), 'NoSuchKey')
|
||||
|
||||
def test_list_parts_error(self):
|
||||
bucket = 'bucket'
|
||||
self.conn.make_request('PUT', bucket)
|
||||
key = 'obj'
|
||||
query = 'uploads'
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, key, query=query)
|
||||
elem = fromstring(body, 'InitiateMultipartUploadResult')
|
||||
upload_id = elem.find('UploadId').text
|
||||
|
||||
query = 'uploadId=%s' % upload_id
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('GET', bucket, key, query=query)
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', 'nothing', key, query=query)
|
||||
self.assertEqual(get_error_code(body), 'NoSuchBucket')
|
||||
|
||||
query = 'uploadId=%s' % 'nothing'
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, key, query=query)
|
||||
self.assertEqual(get_error_code(body), 'NoSuchUpload')
|
||||
|
||||
def test_abort_multi_upload_error(self):
|
||||
bucket = 'bucket'
|
||||
self.conn.make_request('PUT', bucket)
|
||||
key = 'obj'
|
||||
query = 'uploads'
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, key, query=query)
|
||||
elem = fromstring(body, 'InitiateMultipartUploadResult')
|
||||
upload_id = elem.find('UploadId').text
|
||||
self._upload_part(bucket, key, upload_id)
|
||||
|
||||
query = 'uploadId=%s' % upload_id
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('DELETE', bucket, key, query=query)
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('DELETE', 'nothing', key, query=query)
|
||||
self.assertEqual(get_error_code(body), 'NoSuchBucket')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('DELETE', bucket, 'nothing', query=query)
|
||||
self.assertEqual(get_error_code(body), 'NoSuchUpload')
|
||||
|
||||
query = 'uploadId=%s' % 'nothing'
|
||||
status, headers, body = \
|
||||
self.conn.make_request('DELETE', bucket, key, query=query)
|
||||
self.assertEqual(get_error_code(body), 'NoSuchUpload')
|
||||
|
||||
def test_complete_multi_upload_error(self):
|
||||
bucket = 'bucket'
|
||||
keys = ['obj', 'obj2']
|
||||
self.conn.make_request('PUT', bucket)
|
||||
query = 'uploads'
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, keys[0], query=query)
|
||||
elem = fromstring(body, 'InitiateMultipartUploadResult')
|
||||
upload_id = elem.find('UploadId').text
|
||||
|
||||
etags = []
|
||||
for i in xrange(1, 3):
|
||||
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', bucket, keys[0], query=query)
|
||||
etags.append(headers['etag'])
|
||||
xml = self._gen_comp_xml(etags)
|
||||
|
||||
# part 1 too small
|
||||
query = 'uploadId=%s' % upload_id
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, keys[0], body=xml,
|
||||
query=query)
|
||||
self.assertEqual(get_error_code(body), 'EntityTooSmall')
|
||||
|
||||
# invalid credentials
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('POST', bucket, keys[0], body=xml,
|
||||
query=query)
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
|
||||
# wrong/missing bucket
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', 'nothing', keys[0], query=query)
|
||||
self.assertEqual(get_error_code(body), 'NoSuchBucket')
|
||||
|
||||
# wrong upload ID
|
||||
query = 'uploadId=%s' % 'nothing'
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, keys[0], body=xml,
|
||||
query=query)
|
||||
self.assertEqual(get_error_code(body), 'NoSuchUpload')
|
||||
|
||||
# without Part tag in xml
|
||||
query = 'uploadId=%s' % upload_id
|
||||
xml = self._gen_comp_xml([])
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, keys[0], body=xml,
|
||||
query=query)
|
||||
self.assertEqual(get_error_code(body), 'MalformedXML')
|
||||
|
||||
# with invalid etag in xml
|
||||
invalid_etag = 'invalid'
|
||||
xml = self._gen_comp_xml([invalid_etag])
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, keys[0], body=xml,
|
||||
query=query)
|
||||
self.assertEqual(get_error_code(body), 'InvalidPart')
|
||||
|
||||
# without part in Swift
|
||||
query = 'uploads'
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, keys[1], query=query)
|
||||
elem = fromstring(body, 'InitiateMultipartUploadResult')
|
||||
upload_id = elem.find('UploadId').text
|
||||
query = 'uploadId=%s' % upload_id
|
||||
xml = self._gen_comp_xml([etags[0]])
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, keys[1], body=xml,
|
||||
query=query)
|
||||
self.assertEqual(get_error_code(body), 'InvalidPart')
|
||||
|
||||
def test_complete_upload_min_segment_size(self):
|
||||
bucket = 'bucket'
|
||||
key = 'obj'
|
||||
self.conn.make_request('PUT', bucket)
|
||||
query = 'uploads'
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, key, query=query)
|
||||
elem = fromstring(body, 'InitiateMultipartUploadResult')
|
||||
upload_id = elem.find('UploadId').text
|
||||
|
||||
# multi parts with no body
|
||||
etags = []
|
||||
for i in xrange(1, 3):
|
||||
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', bucket, key, query=query)
|
||||
etags.append(headers['etag'])
|
||||
xml = self._gen_comp_xml(etags)
|
||||
|
||||
query = 'uploadId=%s' % upload_id
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, key, body=xml,
|
||||
query=query)
|
||||
self.assertEqual(get_error_code(body), 'EntityTooSmall')
|
||||
|
||||
# multi parts with all parts less than min segment size
|
||||
etags = []
|
||||
for i in xrange(1, 3):
|
||||
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', bucket, key, query=query,
|
||||
body='AA')
|
||||
etags.append(headers['etag'])
|
||||
xml = self._gen_comp_xml(etags)
|
||||
|
||||
query = 'uploadId=%s' % upload_id
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, key, body=xml,
|
||||
query=query)
|
||||
self.assertEqual(get_error_code(body), 'EntityTooSmall')
|
||||
|
||||
# one part and less than min segment size
|
||||
etags = []
|
||||
query = 'partNumber=1&uploadId=%s' % upload_id
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', bucket, key, query=query,
|
||||
body='AA')
|
||||
etags.append(headers['etag'])
|
||||
xml = self._gen_comp_xml(etags)
|
||||
|
||||
query = 'uploadId=%s' % upload_id
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, key, body=xml,
|
||||
query=query)
|
||||
self.assertEqual(status, 200)
|
||||
|
||||
# multi parts with all parts except the first part less than min
|
||||
# segment size
|
||||
query = 'uploads'
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, key, query=query)
|
||||
elem = fromstring(body, 'InitiateMultipartUploadResult')
|
||||
upload_id = elem.find('UploadId').text
|
||||
|
||||
etags = []
|
||||
body_size = [self.min_segment_size, self.min_segment_size - 1, 2]
|
||||
for i in xrange(1, 3):
|
||||
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', bucket, key, query=query,
|
||||
body='A' * body_size[i])
|
||||
etags.append(headers['etag'])
|
||||
xml = self._gen_comp_xml(etags)
|
||||
|
||||
query = 'uploadId=%s' % upload_id
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, key, body=xml,
|
||||
query=query)
|
||||
self.assertEqual(get_error_code(body), 'EntityTooSmall')
|
||||
|
||||
# multi parts with all parts except last part more than min segment
|
||||
# size
|
||||
query = 'uploads'
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, key, query=query)
|
||||
elem = fromstring(body, 'InitiateMultipartUploadResult')
|
||||
upload_id = elem.find('UploadId').text
|
||||
|
||||
etags = []
|
||||
body_size = [self.min_segment_size, self.min_segment_size, 2]
|
||||
for i in xrange(1, 3):
|
||||
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', bucket, key, query=query,
|
||||
body='A' * body_size[i])
|
||||
etags.append(headers['etag'])
|
||||
xml = self._gen_comp_xml(etags)
|
||||
|
||||
query = 'uploadId=%s' % upload_id
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, key, body=xml,
|
||||
query=query)
|
||||
self.assertEqual(status, 200)
|
||||
|
||||
def test_complete_upload_with_fewer_etags(self):
|
||||
bucket = 'bucket'
|
||||
key = 'obj'
|
||||
self.conn.make_request('PUT', bucket)
|
||||
query = 'uploads'
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, key, query=query)
|
||||
elem = fromstring(body, 'InitiateMultipartUploadResult')
|
||||
upload_id = elem.find('UploadId').text
|
||||
|
||||
etags = []
|
||||
for i in xrange(1, 4):
|
||||
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', bucket, key,
|
||||
body='A' * 1024 * 1024 * 5, query=query)
|
||||
etags.append(headers['etag'])
|
||||
query = 'uploadId=%s' % upload_id
|
||||
xml = self._gen_comp_xml(etags[:-1])
|
||||
status, headers, body = \
|
||||
self.conn.make_request('POST', bucket, key, body=xml,
|
||||
query=query)
|
||||
self.assertEqual(status, 200)
|
||||
|
||||
def test_object_multi_upload_part_copy_range(self):
|
||||
bucket = 'bucket'
|
||||
keys = ['obj1']
|
||||
uploads = []
|
||||
|
||||
results_generator = self._initiate_multi_uploads_result_generator(
|
||||
bucket, keys)
|
||||
|
||||
# Initiate Multipart Upload
|
||||
for expected_key, (status, headers, body) in \
|
||||
izip(keys, results_generator):
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertTrue('content-type' in headers)
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
self.assertTrue('content-length' in headers)
|
||||
self.assertEqual(headers['content-length'], str(len(body)))
|
||||
elem = fromstring(body, 'InitiateMultipartUploadResult')
|
||||
self.assertEqual(elem.find('Bucket').text, bucket)
|
||||
key = elem.find('Key').text
|
||||
self.assertEqual(expected_key, key)
|
||||
upload_id = elem.find('UploadId').text
|
||||
self.assertTrue(upload_id is not None)
|
||||
self.assertTrue((key, upload_id) not in uploads)
|
||||
uploads.append((key, upload_id))
|
||||
|
||||
self.assertEqual(len(uploads), len(keys)) # sanity
|
||||
|
||||
# Upload Part Copy Range
|
||||
key, upload_id = uploads[0]
|
||||
src_bucket = 'bucket2'
|
||||
src_obj = 'obj4'
|
||||
src_content = 'y' * (self.min_segment_size / 2) + 'z' * \
|
||||
self.min_segment_size
|
||||
src_range = 'bytes=0-%d' % (self.min_segment_size - 1)
|
||||
etag = md5(src_content[:self.min_segment_size]).hexdigest()
|
||||
|
||||
# prepare src obj
|
||||
self.conn.make_request('PUT', src_bucket)
|
||||
self.conn.make_request('PUT', src_bucket, src_obj, body=src_content)
|
||||
_, headers, _ = self.conn.make_request('HEAD', src_bucket, src_obj)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
|
||||
status, headers, body, resp_etag = \
|
||||
self._upload_part_copy(src_bucket, src_obj, bucket,
|
||||
key, upload_id, 1, src_range)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertTrue('content-type' in headers)
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
self.assertTrue('content-length' in headers)
|
||||
self.assertEqual(headers['content-length'], str(len(body)))
|
||||
self.assertTrue('etag' not in headers)
|
||||
elem = fromstring(body, 'CopyPartResult')
|
||||
|
||||
last_modified = elem.find('LastModified').text
|
||||
self.assertTrue(last_modified is not None)
|
||||
|
||||
self.assertEqual(resp_etag, etag)
|
||||
|
||||
# Check last-modified timestamp
|
||||
key, upload_id = uploads[0]
|
||||
query = 'uploadId=%s' % upload_id
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, key, query=query)
|
||||
|
||||
elem = fromstring(body, 'ListPartsResult')
|
||||
|
||||
# FIXME: COPY result drops milli/microseconds but GET doesn't
|
||||
last_modified_gets = [p.find('LastModified').text
|
||||
for p in elem.iterfind('Part')]
|
||||
self.assertEqual(
|
||||
last_modified_gets[0].rsplit('.', 1)[0],
|
||||
last_modified.rsplit('.', 1)[0],
|
||||
'%r != %r' % (last_modified_gets[0], last_modified))
|
||||
|
||||
# There should be *exactly* one parts in the result
|
||||
self.assertEqual(1, len(last_modified_gets))
|
||||
|
||||
# Abort Multipart Upload
|
||||
key, upload_id = uploads[0]
|
||||
query = 'uploadId=%s' % upload_id
|
||||
status, headers, body = \
|
||||
self.conn.make_request('DELETE', bucket, key, query=query)
|
||||
|
||||
# sanity checks
|
||||
self.assertEqual(status, 204)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertTrue('content-type' in headers)
|
||||
self.assertEqual(headers['content-type'], 'text/html; charset=UTF-8')
|
||||
self.assertTrue('content-length' in headers)
|
||||
self.assertEqual(headers['content-length'], '0')
|
||||
|
||||
|
||||
class TestS3ApiMultiUploadSigV4(TestS3ApiMultiUpload):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
os.environ['S3_USE_SIGV4'] = "True"
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
del os.environ['S3_USE_SIGV4']
|
||||
|
||||
def setUp(self):
|
||||
super(TestS3ApiMultiUploadSigV4, self).setUp()
|
||||
|
||||
def test_object_multi_upload_part_copy_range(self):
|
||||
if StrictVersion(boto.__version__) < StrictVersion('3.0'):
|
||||
self.skipTest('This stuff got the issue of boto<=2.x')
|
||||
|
||||
def test_delete_bucket_multi_upload_object_exisiting(self):
|
||||
bucket = 'bucket'
|
||||
keys = ['obj1']
|
||||
uploads = []
|
||||
|
||||
results_generator = self._initiate_multi_uploads_result_generator(
|
||||
bucket, keys)
|
||||
|
||||
# Initiate Multipart Upload
|
||||
for expected_key, (status, _, body) in \
|
||||
izip(keys, results_generator):
|
||||
self.assertEqual(status, 200) # sanity
|
||||
elem = fromstring(body, 'InitiateMultipartUploadResult')
|
||||
key = elem.find('Key').text
|
||||
self.assertEqual(expected_key, key) # sanity
|
||||
upload_id = elem.find('UploadId').text
|
||||
self.assertTrue(upload_id is not None) # sanity
|
||||
self.assertTrue((key, upload_id) not in uploads)
|
||||
uploads.append((key, upload_id))
|
||||
|
||||
self.assertEqual(len(uploads), len(keys)) # sanity
|
||||
|
||||
# Upload Part
|
||||
key, upload_id = uploads[0]
|
||||
content = 'a' * self.min_segment_size
|
||||
status, headers, body = \
|
||||
self._upload_part(bucket, key, upload_id, content)
|
||||
self.assertEqual(status, 200)
|
||||
|
||||
# Complete Multipart Upload
|
||||
key, upload_id = uploads[0]
|
||||
etags = [md5(content).hexdigest()]
|
||||
xml = self._gen_comp_xml(etags)
|
||||
status, headers, body = \
|
||||
self._complete_multi_upload(bucket, key, upload_id, xml)
|
||||
self.assertEqual(status, 200) # sanity
|
||||
|
||||
# GET multipart object
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, key)
|
||||
self.assertEqual(status, 200) # sanity
|
||||
self.assertEqual(content, body) # sanity
|
||||
|
||||
# DELETE bucket while the object existing
|
||||
status, headers, body = \
|
||||
self.conn.make_request('DELETE', bucket)
|
||||
self.assertEqual(status, 409) # sanity
|
||||
|
||||
# The object must still be there.
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, key)
|
||||
self.assertEqual(status, 200) # sanity
|
||||
self.assertEqual(content, body) # sanity
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest2.main()
|
873
test/functional/s3api/test_object.py
Normal file
873
test/functional/s3api/test_object.py
Normal file
@ -0,0 +1,873 @@
|
||||
# Copyright (c) 2015 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest2
|
||||
import os
|
||||
import boto
|
||||
|
||||
# For an issue with venv and distutils, disable pylint message here
|
||||
# pylint: disable-msg=E0611,F0401
|
||||
from distutils.version import StrictVersion
|
||||
|
||||
import email.parser
|
||||
from email.utils import formatdate, parsedate
|
||||
from time import mktime
|
||||
from hashlib import md5
|
||||
from urllib import quote
|
||||
|
||||
import test.functional as tf
|
||||
|
||||
from swift.common.middleware.s3api.etree import fromstring
|
||||
|
||||
from test.functional.s3api import S3ApiBase
|
||||
from test.functional.s3api.s3_test_client import Connection
|
||||
from test.functional.s3api.utils import get_error_code, calculate_md5
|
||||
|
||||
DAY = 86400.0 # 60 * 60 * 24 (sec)
|
||||
|
||||
|
||||
def setUpModule():
|
||||
tf.setup_package()
|
||||
|
||||
|
||||
def tearDownModule():
|
||||
tf.teardown_package()
|
||||
|
||||
|
||||
class TestS3ApiObject(S3ApiBase):
|
||||
def setUp(self):
|
||||
super(TestS3ApiObject, self).setUp()
|
||||
self.bucket = 'bucket'
|
||||
self.conn.make_request('PUT', self.bucket)
|
||||
|
||||
def _assertObjectEtag(self, bucket, obj, etag):
|
||||
status, headers, _ = self.conn.make_request('HEAD', bucket, obj)
|
||||
self.assertEqual(status, 200) # sanity
|
||||
self.assertCommonResponseHeaders(headers, etag)
|
||||
|
||||
def test_object(self):
|
||||
obj = 'object name with %-sign'
|
||||
content = 'abc123'
|
||||
etag = md5(content).hexdigest()
|
||||
|
||||
# PUT Object
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', self.bucket, obj, body=content)
|
||||
self.assertEqual(status, 200)
|
||||
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertTrue('content-length' in headers) # sanity
|
||||
self.assertEqual(headers['content-length'], '0')
|
||||
self._assertObjectEtag(self.bucket, obj, etag)
|
||||
|
||||
# PUT Object Copy
|
||||
dst_bucket = 'dst-bucket'
|
||||
dst_obj = 'dst_obj'
|
||||
self.conn.make_request('PUT', dst_bucket)
|
||||
headers = {'x-amz-copy-source': '/%s/%s' % (self.bucket, obj)}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', dst_bucket, dst_obj,
|
||||
headers=headers)
|
||||
self.assertEqual(status, 200)
|
||||
|
||||
# PUT Object Copy with URL-encoded Source
|
||||
dst_bucket = 'dst-bucket'
|
||||
dst_obj = 'dst_obj'
|
||||
self.conn.make_request('PUT', dst_bucket)
|
||||
headers = {'x-amz-copy-source': quote('/%s/%s' % (self.bucket, obj))}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', dst_bucket, dst_obj,
|
||||
headers=headers)
|
||||
self.assertEqual(status, 200)
|
||||
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertEqual(headers['content-length'], str(len(body)))
|
||||
|
||||
elem = fromstring(body, 'CopyObjectResult')
|
||||
self.assertTrue(elem.find('LastModified').text is not None)
|
||||
last_modified_xml = elem.find('LastModified').text
|
||||
self.assertTrue(elem.find('ETag').text is not None)
|
||||
self.assertEqual(etag, elem.find('ETag').text.strip('"'))
|
||||
self._assertObjectEtag(dst_bucket, dst_obj, etag)
|
||||
|
||||
# Check timestamp on Copy:
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', dst_bucket)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
|
||||
# FIXME: COPY result drops milli/microseconds but GET doesn't
|
||||
self.assertEqual(
|
||||
elem.find('Contents').find("LastModified").text.rsplit('.', 1)[0],
|
||||
last_modified_xml.rsplit('.', 1)[0])
|
||||
|
||||
# GET Object
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', self.bucket, obj)
|
||||
self.assertEqual(status, 200)
|
||||
|
||||
self.assertCommonResponseHeaders(headers, etag)
|
||||
self.assertTrue(headers['last-modified'] is not None)
|
||||
self.assertTrue(headers['content-type'] is not None)
|
||||
self.assertEqual(headers['content-length'], str(len(content)))
|
||||
|
||||
# HEAD Object
|
||||
status, headers, body = \
|
||||
self.conn.make_request('HEAD', self.bucket, obj)
|
||||
self.assertEqual(status, 200)
|
||||
|
||||
self.assertCommonResponseHeaders(headers, etag)
|
||||
self.assertTrue(headers['last-modified'] is not None)
|
||||
self.assertTrue('content-type' in headers)
|
||||
self.assertEqual(headers['content-length'], str(len(content)))
|
||||
|
||||
# DELETE Object
|
||||
status, headers, body = \
|
||||
self.conn.make_request('DELETE', self.bucket, obj)
|
||||
self.assertEqual(status, 204)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
|
||||
def test_put_object_error(self):
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('PUT', self.bucket, 'object')
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', 'bucket2', 'object')
|
||||
self.assertEqual(get_error_code(body), 'NoSuchBucket')
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
|
||||
def test_put_object_copy_error(self):
|
||||
obj = 'object'
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
dst_bucket = 'dst-bucket'
|
||||
self.conn.make_request('PUT', dst_bucket)
|
||||
dst_obj = 'dst_object'
|
||||
|
||||
headers = {'x-amz-copy-source': '/%s/%s' % (self.bucket, obj)}
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('PUT', dst_bucket, dst_obj, headers)
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
|
||||
# /src/nothing -> /dst/dst
|
||||
headers = {'X-Amz-Copy-Source': '/%s/%s' % (self.bucket, 'nothing')}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', dst_bucket, dst_obj, headers)
|
||||
self.assertEqual(get_error_code(body), 'NoSuchKey')
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
|
||||
# /nothing/src -> /dst/dst
|
||||
headers = {'X-Amz-Copy-Source': '/%s/%s' % ('nothing', obj)}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', dst_bucket, dst_obj, headers)
|
||||
# TODO: source bucket is not check.
|
||||
# self.assertEqual(get_error_code(body), 'NoSuchBucket')
|
||||
|
||||
# /src/src -> /nothing/dst
|
||||
headers = {'X-Amz-Copy-Source': '/%s/%s' % (self.bucket, obj)}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', 'nothing', dst_obj, headers)
|
||||
self.assertEqual(get_error_code(body), 'NoSuchBucket')
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
|
||||
def test_get_object_error(self):
|
||||
obj = 'object'
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('GET', self.bucket, obj)
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', self.bucket, 'invalid')
|
||||
self.assertEqual(get_error_code(body), 'NoSuchKey')
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
|
||||
status, headers, body = self.conn.make_request('GET', 'invalid', obj)
|
||||
self.assertEqual(get_error_code(body), 'NoSuchBucket')
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
|
||||
def test_head_object_error(self):
|
||||
obj = 'object'
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('HEAD', self.bucket, obj)
|
||||
self.assertEqual(status, 403)
|
||||
self.assertEqual(body, '') # sanity
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('HEAD', self.bucket, 'invalid')
|
||||
self.assertEqual(status, 404)
|
||||
self.assertEqual(body, '') # sanity
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('HEAD', 'invalid', obj)
|
||||
self.assertEqual(status, 404)
|
||||
self.assertEqual(body, '') # sanity
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
|
||||
def test_delete_object_error(self):
|
||||
obj = 'object'
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('DELETE', self.bucket, obj)
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('DELETE', self.bucket, 'invalid')
|
||||
self.assertEqual(get_error_code(body), 'NoSuchKey')
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('DELETE', 'invalid', obj)
|
||||
self.assertEqual(get_error_code(body), 'NoSuchBucket')
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
|
||||
def test_put_object_content_encoding(self):
|
||||
obj = 'object'
|
||||
etag = md5().hexdigest()
|
||||
headers = {'Content-Encoding': 'gzip'}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', self.bucket, obj, headers)
|
||||
self.assertEqual(status, 200)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('HEAD', self.bucket, obj)
|
||||
self.assertTrue('content-encoding' in headers) # sanity
|
||||
self.assertEqual(headers['content-encoding'], 'gzip')
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self._assertObjectEtag(self.bucket, obj, etag)
|
||||
|
||||
def test_put_object_content_md5(self):
|
||||
obj = 'object'
|
||||
content = 'abcdefghij'
|
||||
etag = md5(content).hexdigest()
|
||||
headers = {'Content-MD5': calculate_md5(content)}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', self.bucket, obj, headers, content)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self._assertObjectEtag(self.bucket, obj, etag)
|
||||
|
||||
def test_put_object_content_type(self):
|
||||
obj = 'object'
|
||||
content = 'abcdefghij'
|
||||
etag = md5(content).hexdigest()
|
||||
headers = {'Content-Type': 'text/plain'}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', self.bucket, obj, headers, content)
|
||||
self.assertEqual(status, 200)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('HEAD', self.bucket, obj)
|
||||
self.assertEqual(headers['content-type'], 'text/plain')
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self._assertObjectEtag(self.bucket, obj, etag)
|
||||
|
||||
def test_put_object_conditional_requests(self):
|
||||
obj = 'object'
|
||||
content = 'abcdefghij'
|
||||
headers = {'If-None-Match': '*'}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', self.bucket, obj, headers, content)
|
||||
self.assertEqual(status, 501)
|
||||
|
||||
headers = {'If-Match': '*'}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', self.bucket, obj, headers, content)
|
||||
self.assertEqual(status, 501)
|
||||
|
||||
headers = {'If-Modified-Since': 'Sat, 27 Jun 2015 00:00:00 GMT'}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', self.bucket, obj, headers, content)
|
||||
self.assertEqual(status, 501)
|
||||
|
||||
headers = {'If-Unmodified-Since': 'Sat, 27 Jun 2015 00:00:00 GMT'}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', self.bucket, obj, headers, content)
|
||||
self.assertEqual(status, 501)
|
||||
|
||||
# None of the above should actually have created an object
|
||||
status, headers, body = \
|
||||
self.conn.make_request('HEAD', self.bucket, obj, {}, '')
|
||||
self.assertEqual(status, 404)
|
||||
|
||||
def test_put_object_expect(self):
|
||||
obj = 'object'
|
||||
content = 'abcdefghij'
|
||||
etag = md5(content).hexdigest()
|
||||
headers = {'Expect': '100-continue'}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', self.bucket, obj, headers, content)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self._assertObjectEtag(self.bucket, obj, etag)
|
||||
|
||||
def _test_put_object_headers(self, req_headers, expected_headers=None):
|
||||
if expected_headers is None:
|
||||
expected_headers = req_headers
|
||||
obj = 'object'
|
||||
content = 'abcdefghij'
|
||||
etag = md5(content).hexdigest()
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', self.bucket, obj,
|
||||
req_headers, content)
|
||||
self.assertEqual(status, 200)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('HEAD', self.bucket, obj)
|
||||
for header, value in expected_headers.items():
|
||||
self.assertIn(header.lower(), headers)
|
||||
self.assertEqual(headers[header.lower()], value)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self._assertObjectEtag(self.bucket, obj, etag)
|
||||
|
||||
def test_put_object_metadata(self):
|
||||
self._test_put_object_headers({
|
||||
'X-Amz-Meta-Bar': 'foo',
|
||||
'X-Amz-Meta-Bar2': 'foo2'})
|
||||
|
||||
def test_put_object_weird_metadata(self):
|
||||
req_headers = dict(
|
||||
('x-amz-meta-' + c, c)
|
||||
for c in '!"#$%&\'()*+-./<=>?@[\\]^`{|}~')
|
||||
exp_headers = dict(
|
||||
('x-amz-meta-' + c, c)
|
||||
for c in '!#$%&\'(*+-.^`|~')
|
||||
self._test_put_object_headers(req_headers, exp_headers)
|
||||
|
||||
def test_put_object_underscore_in_metadata(self):
|
||||
# Break this out separately for ease of testing pre-0.19.0 eventlet
|
||||
self._test_put_object_headers({
|
||||
'X-Amz-Meta-Foo-Bar': 'baz',
|
||||
'X-Amz-Meta-Foo_Bar': 'also baz'})
|
||||
|
||||
def test_put_object_content_headers(self):
|
||||
self._test_put_object_headers({
|
||||
'Content-Type': 'foo/bar',
|
||||
'Content-Encoding': 'baz',
|
||||
'Content-Disposition': 'attachment',
|
||||
'Content-Language': 'en'})
|
||||
|
||||
def test_put_object_cache_control(self):
|
||||
self._test_put_object_headers({
|
||||
'Cache-Control': 'private, some-extension'})
|
||||
|
||||
def test_put_object_expires(self):
|
||||
self._test_put_object_headers({
|
||||
# We don't validate that the Expires header is a valid date
|
||||
'Expires': 'a valid HTTP-date timestamp'})
|
||||
|
||||
def test_put_object_robots_tag(self):
|
||||
self._test_put_object_headers({
|
||||
'X-Robots-Tag': 'googlebot: noarchive'})
|
||||
|
||||
def test_put_object_storage_class(self):
|
||||
obj = 'object'
|
||||
content = 'abcdefghij'
|
||||
etag = md5(content).hexdigest()
|
||||
headers = {'X-Amz-Storage-Class': 'STANDARD'}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', self.bucket, obj, headers, content)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self._assertObjectEtag(self.bucket, obj, etag)
|
||||
|
||||
def test_put_object_copy_source_params(self):
|
||||
obj = 'object'
|
||||
src_headers = {'X-Amz-Meta-Test': 'src'}
|
||||
src_body = 'some content'
|
||||
dst_bucket = 'dst-bucket'
|
||||
dst_obj = 'dst_object'
|
||||
self.conn.make_request('PUT', self.bucket, obj, src_headers, src_body)
|
||||
self.conn.make_request('PUT', dst_bucket)
|
||||
|
||||
headers = {'X-Amz-Copy-Source': '/%s/%s?nonsense' % (
|
||||
self.bucket, obj)}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', dst_bucket, dst_obj, headers)
|
||||
self.assertEqual(status, 400)
|
||||
self.assertEqual(get_error_code(body), 'InvalidArgument')
|
||||
|
||||
headers = {'X-Amz-Copy-Source': '/%s/%s?versionId=null&nonsense' % (
|
||||
self.bucket, obj)}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', dst_bucket, dst_obj, headers)
|
||||
self.assertEqual(status, 400)
|
||||
self.assertEqual(get_error_code(body), 'InvalidArgument')
|
||||
|
||||
headers = {'X-Amz-Copy-Source': '/%s/%s?versionId=null' % (
|
||||
self.bucket, obj)}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', dst_bucket, dst_obj, headers)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', dst_bucket, dst_obj)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertEqual(headers['x-amz-meta-test'], 'src')
|
||||
self.assertEqual(body, src_body)
|
||||
|
||||
def test_put_object_copy_source(self):
|
||||
obj = 'object'
|
||||
content = 'abcdefghij'
|
||||
etag = md5(content).hexdigest()
|
||||
self.conn.make_request('PUT', self.bucket, obj, body=content)
|
||||
|
||||
dst_bucket = 'dst-bucket'
|
||||
dst_obj = 'dst_object'
|
||||
self.conn.make_request('PUT', dst_bucket)
|
||||
|
||||
# /src/src -> /dst/dst
|
||||
headers = {'X-Amz-Copy-Source': '/%s/%s' % (self.bucket, obj)}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', dst_bucket, dst_obj, headers)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self._assertObjectEtag(dst_bucket, dst_obj, etag)
|
||||
|
||||
# /src/src -> /src/dst
|
||||
headers = {'X-Amz-Copy-Source': '/%s/%s' % (self.bucket, obj)}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', self.bucket, dst_obj, headers)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self._assertObjectEtag(self.bucket, dst_obj, etag)
|
||||
|
||||
# /src/src -> /src/src
|
||||
# need changes to copy itself (e.g. metadata)
|
||||
headers = {'X-Amz-Copy-Source': '/%s/%s' % (self.bucket, obj),
|
||||
'X-Amz-Meta-Foo': 'bar',
|
||||
'X-Amz-Metadata-Directive': 'REPLACE'}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', self.bucket, obj, headers)
|
||||
self.assertEqual(status, 200)
|
||||
self._assertObjectEtag(self.bucket, obj, etag)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
|
||||
def test_put_object_copy_metadata_directive(self):
|
||||
obj = 'object'
|
||||
src_headers = {'X-Amz-Meta-Test': 'src'}
|
||||
dst_bucket = 'dst-bucket'
|
||||
dst_obj = 'dst_object'
|
||||
self.conn.make_request('PUT', self.bucket, obj, headers=src_headers)
|
||||
self.conn.make_request('PUT', dst_bucket)
|
||||
|
||||
headers = {'X-Amz-Copy-Source': '/%s/%s' % (self.bucket, obj),
|
||||
'X-Amz-Metadata-Directive': 'REPLACE',
|
||||
'X-Amz-Meta-Test': 'dst'}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', dst_bucket, dst_obj, headers)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('HEAD', dst_bucket, dst_obj)
|
||||
self.assertEqual(headers['x-amz-meta-test'], 'dst')
|
||||
|
||||
def test_put_object_copy_source_if_modified_since(self):
|
||||
obj = 'object'
|
||||
dst_bucket = 'dst-bucket'
|
||||
dst_obj = 'dst_object'
|
||||
etag = md5().hexdigest()
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
self.conn.make_request('PUT', dst_bucket)
|
||||
|
||||
_, headers, _ = self.conn.make_request('HEAD', self.bucket, obj)
|
||||
src_datetime = mktime(parsedate(headers['last-modified']))
|
||||
src_datetime = src_datetime - DAY
|
||||
headers = {'X-Amz-Copy-Source': '/%s/%s' % (self.bucket, obj),
|
||||
'X-Amz-Copy-Source-If-Modified-Since':
|
||||
formatdate(src_datetime)}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', dst_bucket, dst_obj, headers=headers)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self._assertObjectEtag(self.bucket, obj, etag)
|
||||
|
||||
def test_put_object_copy_source_if_unmodified_since(self):
|
||||
obj = 'object'
|
||||
dst_bucket = 'dst-bucket'
|
||||
dst_obj = 'dst_object'
|
||||
etag = md5().hexdigest()
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
self.conn.make_request('PUT', dst_bucket)
|
||||
|
||||
_, headers, _ = self.conn.make_request('HEAD', self.bucket, obj)
|
||||
src_datetime = mktime(parsedate(headers['last-modified']))
|
||||
src_datetime = src_datetime + DAY
|
||||
headers = {'X-Amz-Copy-Source': '/%s/%s' % (self.bucket, obj),
|
||||
'X-Amz-Copy-Source-If-Unmodified-Since':
|
||||
formatdate(src_datetime)}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', dst_bucket, dst_obj, headers=headers)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self._assertObjectEtag(self.bucket, obj, etag)
|
||||
|
||||
def test_put_object_copy_source_if_match(self):
|
||||
obj = 'object'
|
||||
dst_bucket = 'dst-bucket'
|
||||
dst_obj = 'dst_object'
|
||||
etag = md5().hexdigest()
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
self.conn.make_request('PUT', dst_bucket)
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('HEAD', self.bucket, obj)
|
||||
|
||||
headers = {'X-Amz-Copy-Source': '/%s/%s' % (self.bucket, obj),
|
||||
'X-Amz-Copy-Source-If-Match': etag}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', dst_bucket, dst_obj, headers=headers)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self._assertObjectEtag(self.bucket, obj, etag)
|
||||
|
||||
def test_put_object_copy_source_if_none_match(self):
|
||||
obj = 'object'
|
||||
dst_bucket = 'dst-bucket'
|
||||
dst_obj = 'dst_object'
|
||||
etag = md5().hexdigest()
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
self.conn.make_request('PUT', dst_bucket)
|
||||
|
||||
headers = {'X-Amz-Copy-Source': '/%s/%s' % (self.bucket, obj),
|
||||
'X-Amz-Copy-Source-If-None-Match': 'none-match'}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', dst_bucket, dst_obj, headers=headers)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self._assertObjectEtag(self.bucket, obj, etag)
|
||||
|
||||
def test_get_object_response_content_type(self):
|
||||
obj = 'obj'
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
|
||||
query = 'response-content-type=text/plain'
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', self.bucket, obj, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertEqual(headers['content-type'], 'text/plain')
|
||||
|
||||
def test_get_object_response_content_language(self):
|
||||
obj = 'object'
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
|
||||
query = 'response-content-language=en'
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', self.bucket, obj, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertEqual(headers['content-language'], 'en')
|
||||
|
||||
def test_get_object_response_cache_control(self):
|
||||
obj = 'object'
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
|
||||
query = 'response-cache-control=private'
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', self.bucket, obj, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertEqual(headers['cache-control'], 'private')
|
||||
|
||||
def test_get_object_response_content_disposition(self):
|
||||
obj = 'object'
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
|
||||
query = 'response-content-disposition=inline'
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', self.bucket, obj, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertEqual(headers['content-disposition'], 'inline')
|
||||
|
||||
def test_get_object_response_content_encoding(self):
|
||||
obj = 'object'
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
|
||||
query = 'response-content-encoding=gzip'
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', self.bucket, obj, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertEqual(headers['content-encoding'], 'gzip')
|
||||
|
||||
def test_get_object_range(self):
|
||||
obj = 'object'
|
||||
content = 'abcdefghij'
|
||||
headers = {'x-amz-meta-test': 'swift'}
|
||||
self.conn.make_request(
|
||||
'PUT', self.bucket, obj, headers=headers, body=content)
|
||||
|
||||
headers = {'Range': 'bytes=1-5'}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', self.bucket, obj, headers=headers)
|
||||
self.assertEqual(status, 206)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertTrue('content-length' in headers)
|
||||
self.assertEqual(headers['content-length'], '5')
|
||||
self.assertTrue('x-amz-meta-test' in headers)
|
||||
self.assertEqual('swift', headers['x-amz-meta-test'])
|
||||
self.assertEqual(body, 'bcdef')
|
||||
|
||||
headers = {'Range': 'bytes=5-'}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', self.bucket, obj, headers=headers)
|
||||
self.assertEqual(status, 206)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertTrue('content-length' in headers)
|
||||
self.assertEqual(headers['content-length'], '5')
|
||||
self.assertTrue('x-amz-meta-test' in headers)
|
||||
self.assertEqual('swift', headers['x-amz-meta-test'])
|
||||
self.assertEqual(body, 'fghij')
|
||||
|
||||
headers = {'Range': 'bytes=-5'}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', self.bucket, obj, headers=headers)
|
||||
self.assertEqual(status, 206)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertTrue('content-length' in headers)
|
||||
self.assertEqual(headers['content-length'], '5')
|
||||
self.assertTrue('x-amz-meta-test' in headers)
|
||||
self.assertEqual('swift', headers['x-amz-meta-test'])
|
||||
self.assertEqual(body, 'fghij')
|
||||
|
||||
ranges = ['1-2', '4-5']
|
||||
|
||||
headers = {'Range': 'bytes=%s' % ','.join(ranges)}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', self.bucket, obj, headers=headers)
|
||||
self.assertEqual(status, 206)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertTrue('content-length' in headers)
|
||||
|
||||
self.assertTrue('content-type' in headers) # sanity
|
||||
content_type, boundary = headers['content-type'].split(';')
|
||||
|
||||
self.assertEqual('multipart/byteranges', content_type)
|
||||
self.assertTrue(boundary.startswith('boundary=')) # sanity
|
||||
boundary_str = boundary[len('boundary='):]
|
||||
|
||||
# TODO: Using swift.common.utils.multipart_byteranges_to_document_iters
|
||||
# could be easy enough.
|
||||
parser = email.parser.FeedParser()
|
||||
parser.feed(
|
||||
"Content-Type: multipart/byterange; boundary=%s\r\n\r\n" %
|
||||
boundary_str)
|
||||
parser.feed(body)
|
||||
message = parser.close()
|
||||
|
||||
self.assertTrue(message.is_multipart()) # sanity check
|
||||
mime_parts = message.get_payload()
|
||||
self.assertEqual(len(mime_parts), len(ranges)) # sanity
|
||||
|
||||
for index, range_value in enumerate(ranges):
|
||||
start, end = map(int, range_value.split('-'))
|
||||
# go to next section and check sanity
|
||||
self.assertTrue(mime_parts[index])
|
||||
|
||||
part = mime_parts[index]
|
||||
self.assertEqual(
|
||||
'application/octet-stream', part.get_content_type())
|
||||
expected_range = 'bytes %s/%s' % (range_value, len(content))
|
||||
self.assertEqual(
|
||||
expected_range, part.get('Content-Range'))
|
||||
# rest
|
||||
payload = part.get_payload().strip()
|
||||
self.assertEqual(content[start:end + 1], payload)
|
||||
|
||||
def test_get_object_if_modified_since(self):
|
||||
obj = 'object'
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
|
||||
_, headers, _ = self.conn.make_request('HEAD', self.bucket, obj)
|
||||
src_datetime = mktime(parsedate(headers['last-modified']))
|
||||
src_datetime = src_datetime - DAY
|
||||
headers = {'If-Modified-Since': formatdate(src_datetime)}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', self.bucket, obj, headers=headers)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
|
||||
def test_get_object_if_unmodified_since(self):
|
||||
obj = 'object'
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
|
||||
_, headers, _ = self.conn.make_request('HEAD', self.bucket, obj)
|
||||
src_datetime = mktime(parsedate(headers['last-modified']))
|
||||
src_datetime = src_datetime + DAY
|
||||
headers = \
|
||||
{'If-Unmodified-Since': formatdate(src_datetime)}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', self.bucket, obj, headers=headers)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
|
||||
def test_get_object_if_match(self):
|
||||
obj = 'object'
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('HEAD', self.bucket, obj)
|
||||
etag = headers['etag']
|
||||
|
||||
headers = {'If-Match': etag}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', self.bucket, obj, headers=headers)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
|
||||
def test_get_object_if_none_match(self):
|
||||
obj = 'object'
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
|
||||
headers = {'If-None-Match': 'none-match'}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', self.bucket, obj, headers=headers)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
|
||||
def test_head_object_range(self):
|
||||
obj = 'object'
|
||||
content = 'abcdefghij'
|
||||
self.conn.make_request('PUT', self.bucket, obj, body=content)
|
||||
|
||||
headers = {'Range': 'bytes=1-5'}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('HEAD', self.bucket, obj, headers=headers)
|
||||
self.assertEqual(headers['content-length'], '5')
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
|
||||
headers = {'Range': 'bytes=5-'}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('HEAD', self.bucket, obj, headers=headers)
|
||||
self.assertEqual(headers['content-length'], '5')
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
|
||||
headers = {'Range': 'bytes=-5'}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('HEAD', self.bucket, obj, headers=headers)
|
||||
self.assertEqual(headers['content-length'], '5')
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
|
||||
def test_head_object_if_modified_since(self):
|
||||
obj = 'object'
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
|
||||
_, headers, _ = self.conn.make_request('HEAD', self.bucket, obj)
|
||||
dt = mktime(parsedate(headers['last-modified']))
|
||||
dt = dt - DAY
|
||||
|
||||
headers = {'If-Modified-Since': formatdate(dt)}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('HEAD', self.bucket, obj, headers=headers)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
|
||||
def test_head_object_if_unmodified_since(self):
|
||||
obj = 'object'
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
|
||||
_, headers, _ = self.conn.make_request('HEAD', self.bucket, obj)
|
||||
dt = mktime(parsedate(headers['last-modified']))
|
||||
dt = dt + DAY
|
||||
|
||||
headers = {'If-Unmodified-Since': formatdate(dt)}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('HEAD', self.bucket, obj, headers=headers)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
|
||||
def test_head_object_if_match(self):
|
||||
obj = 'object'
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('HEAD', self.bucket, obj)
|
||||
etag = headers['etag']
|
||||
|
||||
headers = {'If-Match': etag}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('HEAD', self.bucket, obj, headers=headers)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
|
||||
def test_head_object_if_none_match(self):
|
||||
obj = 'object'
|
||||
self.conn.make_request('PUT', self.bucket, obj)
|
||||
|
||||
headers = {'If-None-Match': 'none-match'}
|
||||
status, headers, body = \
|
||||
self.conn.make_request('HEAD', self.bucket, obj, headers=headers)
|
||||
self.assertEqual(status, 200)
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
|
||||
|
||||
class TestS3ApiObjectSigV4(TestS3ApiObject):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
os.environ['S3_USE_SIGV4'] = "True"
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
del os.environ['S3_USE_SIGV4']
|
||||
|
||||
def setUp(self):
|
||||
super(TestS3ApiObjectSigV4, self).setUp()
|
||||
|
||||
@unittest2.skipIf(StrictVersion(boto.__version__) < StrictVersion('3.0'),
|
||||
'This stuff got the signing issue of boto<=2.x')
|
||||
def test_put_object_metadata(self):
|
||||
super(TestS3ApiObjectSigV4, self).test_put_object_metadata()
|
||||
|
||||
@unittest2.skipIf(StrictVersion(boto.__version__) < StrictVersion('3.0'),
|
||||
'This stuff got the signing issue of boto<=2.x')
|
||||
def test_put_object_copy_source_if_modified_since(self):
|
||||
super(TestS3ApiObjectSigV4, self).\
|
||||
test_put_object_copy_source_if_modified_since()
|
||||
|
||||
@unittest2.skipIf(StrictVersion(boto.__version__) < StrictVersion('3.0'),
|
||||
'This stuff got the signing issue of boto<=2.x')
|
||||
def test_put_object_copy_source_if_unmodified_since(self):
|
||||
super(TestS3ApiObjectSigV4, self).\
|
||||
test_put_object_copy_source_if_unmodified_since()
|
||||
|
||||
@unittest2.skipIf(StrictVersion(boto.__version__) < StrictVersion('3.0'),
|
||||
'This stuff got the signing issue of boto<=2.x')
|
||||
def test_put_object_copy_source_if_match(self):
|
||||
super(TestS3ApiObjectSigV4,
|
||||
self).test_put_object_copy_source_if_match()
|
||||
|
||||
@unittest2.skipIf(StrictVersion(boto.__version__) < StrictVersion('3.0'),
|
||||
'This stuff got the signing issue of boto<=2.x')
|
||||
def test_put_object_copy_source_if_none_match(self):
|
||||
super(TestS3ApiObjectSigV4,
|
||||
self).test_put_object_copy_source_if_none_match()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest2.main()
|
237
test/functional/s3api/test_presigned.py
Normal file
237
test/functional/s3api/test_presigned.py
Normal file
@ -0,0 +1,237 @@
|
||||
# Copyright (c) 2016 SwiftStack, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
|
||||
import requests
|
||||
|
||||
from swift.common.middleware.s3api.etree import fromstring
|
||||
|
||||
import test.functional as tf
|
||||
|
||||
from test.functional.s3api import S3ApiBase
|
||||
from test.functional.s3api.utils import get_error_code, get_error_msg
|
||||
|
||||
|
||||
def setUpModule():
|
||||
tf.setup_package()
|
||||
|
||||
|
||||
def tearDownModule():
|
||||
tf.teardown_package()
|
||||
|
||||
|
||||
class TestS3ApiPresignedUrls(S3ApiBase):
|
||||
def test_bucket(self):
|
||||
bucket = 'test-bucket'
|
||||
req_objects = ('object', 'object2')
|
||||
max_bucket_listing = tf.cluster_info['s3api'].get(
|
||||
'max_bucket_listing', 1000)
|
||||
|
||||
# GET Bucket (Without Object)
|
||||
status, _junk, _junk = self.conn.make_request('PUT', bucket)
|
||||
self.assertEqual(status, 200)
|
||||
|
||||
url, headers = self.conn.generate_url_and_headers('GET', bucket)
|
||||
resp = requests.get(url, headers=headers)
|
||||
self.assertEqual(resp.status_code, 200,
|
||||
'Got %d %s' % (resp.status_code, resp.content))
|
||||
self.assertCommonResponseHeaders(resp.headers)
|
||||
self.assertIsNotNone(resp.headers['content-type'])
|
||||
self.assertEqual(resp.headers['content-length'],
|
||||
str(len(resp.content)))
|
||||
|
||||
elem = fromstring(resp.content, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('Name').text, bucket)
|
||||
self.assertIsNone(elem.find('Prefix').text)
|
||||
self.assertIsNone(elem.find('Marker').text)
|
||||
self.assertEqual(elem.find('MaxKeys').text,
|
||||
str(max_bucket_listing))
|
||||
self.assertEqual(elem.find('IsTruncated').text, 'false')
|
||||
objects = elem.findall('./Contents')
|
||||
self.assertEqual(list(objects), [])
|
||||
|
||||
# GET Bucket (With Object)
|
||||
for obj in req_objects:
|
||||
status, _junk, _junk = self.conn.make_request('PUT', bucket, obj)
|
||||
self.assertEqual(
|
||||
status, 200,
|
||||
'Got %d response while creating %s' % (status, obj))
|
||||
|
||||
resp = requests.get(url, headers=headers)
|
||||
self.assertEqual(resp.status_code, 200,
|
||||
'Got %d %s' % (resp.status_code, resp.content))
|
||||
self.assertCommonResponseHeaders(resp.headers)
|
||||
self.assertIsNotNone(resp.headers['content-type'])
|
||||
self.assertEqual(resp.headers['content-length'],
|
||||
str(len(resp.content)))
|
||||
|
||||
elem = fromstring(resp.content, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('Name').text, bucket)
|
||||
self.assertIsNone(elem.find('Prefix').text)
|
||||
self.assertIsNone(elem.find('Marker').text)
|
||||
self.assertEqual(elem.find('MaxKeys').text,
|
||||
str(max_bucket_listing))
|
||||
self.assertEqual(elem.find('IsTruncated').text, 'false')
|
||||
resp_objects = elem.findall('./Contents')
|
||||
self.assertEqual(len(list(resp_objects)), 2)
|
||||
for o in resp_objects:
|
||||
self.assertIn(o.find('Key').text, req_objects)
|
||||
self.assertIsNotNone(o.find('LastModified').text)
|
||||
self.assertRegexpMatches(
|
||||
o.find('LastModified').text,
|
||||
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
|
||||
self.assertIsNotNone(o.find('ETag').text)
|
||||
self.assertEqual(o.find('Size').text, '0')
|
||||
self.assertIsNotNone(o.find('StorageClass').text is not None)
|
||||
self.assertEqual(o.find('Owner/ID').text, self.conn.user_id)
|
||||
self.assertEqual(o.find('Owner/DisplayName').text,
|
||||
self.conn.user_id)
|
||||
# DELETE Bucket
|
||||
for obj in req_objects:
|
||||
self.conn.make_request('DELETE', bucket, obj)
|
||||
url, headers = self.conn.generate_url_and_headers('DELETE', bucket)
|
||||
resp = requests.delete(url, headers=headers)
|
||||
self.assertEqual(resp.status_code, 204,
|
||||
'Got %d %s' % (resp.status_code, resp.content))
|
||||
|
||||
def test_expiration_limits(self):
|
||||
if os.environ.get('S3_USE_SIGV4'):
|
||||
self._test_expiration_limits_v4()
|
||||
else:
|
||||
self._test_expiration_limits_v2()
|
||||
|
||||
def _test_expiration_limits_v2(self):
|
||||
bucket = 'test-bucket'
|
||||
|
||||
# Expiration date is too far in the future
|
||||
url, headers = self.conn.generate_url_and_headers(
|
||||
'GET', bucket, expires_in=2 ** 32)
|
||||
resp = requests.get(url, headers=headers)
|
||||
self.assertEqual(resp.status_code, 403,
|
||||
'Got %d %s' % (resp.status_code, resp.content))
|
||||
self.assertEqual(get_error_code(resp.content),
|
||||
'AccessDenied')
|
||||
self.assertIn('Invalid date (should be seconds since epoch)',
|
||||
get_error_msg(resp.content))
|
||||
|
||||
def _test_expiration_limits_v4(self):
|
||||
bucket = 'test-bucket'
|
||||
|
||||
# Expiration is negative
|
||||
url, headers = self.conn.generate_url_and_headers(
|
||||
'GET', bucket, expires_in=-1)
|
||||
resp = requests.get(url, headers=headers)
|
||||
self.assertEqual(resp.status_code, 400,
|
||||
'Got %d %s' % (resp.status_code, resp.content))
|
||||
self.assertEqual(get_error_code(resp.content),
|
||||
'AuthorizationQueryParametersError')
|
||||
self.assertIn('X-Amz-Expires must be non-negative',
|
||||
get_error_msg(resp.content))
|
||||
|
||||
# Expiration date is too far in the future
|
||||
for exp in (7 * 24 * 60 * 60 + 1,
|
||||
2 ** 63 - 1):
|
||||
url, headers = self.conn.generate_url_and_headers(
|
||||
'GET', bucket, expires_in=exp)
|
||||
resp = requests.get(url, headers=headers)
|
||||
self.assertEqual(resp.status_code, 400,
|
||||
'Got %d %s' % (resp.status_code, resp.content))
|
||||
self.assertEqual(get_error_code(resp.content),
|
||||
'AuthorizationQueryParametersError')
|
||||
self.assertIn('X-Amz-Expires must be less than 604800 seconds',
|
||||
get_error_msg(resp.content))
|
||||
|
||||
# Expiration date is *way* too far in the future, or isn't a number
|
||||
for exp in (2 ** 63, 'foo'):
|
||||
url, headers = self.conn.generate_url_and_headers(
|
||||
'GET', bucket, expires_in=2 ** 63)
|
||||
resp = requests.get(url, headers=headers)
|
||||
self.assertEqual(resp.status_code, 400,
|
||||
'Got %d %s' % (resp.status_code, resp.content))
|
||||
self.assertEqual(get_error_code(resp.content),
|
||||
'AuthorizationQueryParametersError')
|
||||
self.assertEqual('X-Amz-Expires should be a number',
|
||||
get_error_msg(resp.content))
|
||||
|
||||
def test_object(self):
|
||||
bucket = 'test-bucket'
|
||||
obj = 'object'
|
||||
|
||||
status, _junk, _junk = self.conn.make_request('PUT', bucket)
|
||||
self.assertEqual(status, 200)
|
||||
|
||||
# HEAD/missing object
|
||||
head_url, headers = self.conn.generate_url_and_headers(
|
||||
'HEAD', bucket, obj)
|
||||
resp = requests.head(head_url, headers=headers)
|
||||
self.assertEqual(resp.status_code, 404,
|
||||
'Got %d %s' % (resp.status_code, resp.content))
|
||||
|
||||
# Wrong verb
|
||||
resp = requests.get(head_url)
|
||||
self.assertEqual(resp.status_code, 403,
|
||||
'Got %d %s' % (resp.status_code, resp.content))
|
||||
self.assertEqual(get_error_code(resp.content),
|
||||
'SignatureDoesNotMatch')
|
||||
|
||||
# PUT empty object
|
||||
put_url, headers = self.conn.generate_url_and_headers(
|
||||
'PUT', bucket, obj)
|
||||
resp = requests.put(put_url, data='', headers=headers)
|
||||
self.assertEqual(resp.status_code, 200,
|
||||
'Got %d %s' % (resp.status_code, resp.content))
|
||||
# GET empty object
|
||||
get_url, headers = self.conn.generate_url_and_headers(
|
||||
'GET', bucket, obj)
|
||||
resp = requests.get(get_url, headers=headers)
|
||||
self.assertEqual(resp.status_code, 200,
|
||||
'Got %d %s' % (resp.status_code, resp.content))
|
||||
self.assertEqual(resp.content, '')
|
||||
|
||||
# PUT over object
|
||||
resp = requests.put(put_url, data='foobar', headers=headers)
|
||||
self.assertEqual(resp.status_code, 200,
|
||||
'Got %d %s' % (resp.status_code, resp.content))
|
||||
|
||||
# GET non-empty object
|
||||
resp = requests.get(get_url, headers=headers)
|
||||
self.assertEqual(resp.status_code, 200,
|
||||
'Got %d %s' % (resp.status_code, resp.content))
|
||||
self.assertEqual(resp.content, 'foobar')
|
||||
|
||||
# DELETE Object
|
||||
delete_url, headers = self.conn.generate_url_and_headers(
|
||||
'DELETE', bucket, obj)
|
||||
resp = requests.delete(delete_url, headers=headers)
|
||||
self.assertEqual(resp.status_code, 204,
|
||||
'Got %d %s' % (resp.status_code, resp.content))
|
||||
|
||||
# Final cleanup
|
||||
status, _junk, _junk = self.conn.make_request('DELETE', bucket)
|
||||
self.assertEqual(status, 204)
|
||||
|
||||
|
||||
class TestS3ApiPresignedUrlsSigV4(TestS3ApiPresignedUrls):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
os.environ['S3_USE_SIGV4'] = "True"
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
del os.environ['S3_USE_SIGV4']
|
||||
|
||||
def setUp(self):
|
||||
super(TestS3ApiPresignedUrlsSigV4, self).setUp()
|
100
test/functional/s3api/test_service.py
Normal file
100
test/functional/s3api/test_service.py
Normal file
@ -0,0 +1,100 @@
|
||||
# Copyright (c) 2015 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest2
|
||||
import os
|
||||
|
||||
import test.functional as tf
|
||||
|
||||
from swift.common.middleware.s3api.etree import fromstring
|
||||
|
||||
from test.functional.s3api import S3ApiBase
|
||||
from test.functional.s3api.s3_test_client import Connection
|
||||
from test.functional.s3api.utils import get_error_code
|
||||
|
||||
|
||||
def setUpModule():
|
||||
tf.setup_package()
|
||||
|
||||
|
||||
def tearDownModule():
|
||||
tf.teardown_package()
|
||||
|
||||
|
||||
class TestS3ApiService(S3ApiBase):
|
||||
def setUp(self):
|
||||
super(TestS3ApiService, self).setUp()
|
||||
|
||||
def test_service(self):
|
||||
# GET Service(without bucket)
|
||||
status, headers, body = self.conn.make_request('GET')
|
||||
self.assertEqual(status, 200)
|
||||
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertTrue(headers['content-type'] is not None)
|
||||
# TODO; requires consideration
|
||||
# self.assertEqual(headers['transfer-encoding'], 'chunked')
|
||||
|
||||
elem = fromstring(body, 'ListAllMyBucketsResult')
|
||||
buckets = elem.findall('./Buckets/Bucket')
|
||||
self.assertEqual(list(buckets), [])
|
||||
owner = elem.find('Owner')
|
||||
self.assertEqual(self.conn.user_id, owner.find('ID').text)
|
||||
self.assertEqual(self.conn.user_id, owner.find('DisplayName').text)
|
||||
|
||||
# GET Service(with Bucket)
|
||||
req_buckets = ('bucket', 'bucket2')
|
||||
for bucket in req_buckets:
|
||||
self.conn.make_request('PUT', bucket)
|
||||
status, headers, body = self.conn.make_request('GET')
|
||||
self.assertEqual(status, 200)
|
||||
|
||||
elem = fromstring(body, 'ListAllMyBucketsResult')
|
||||
resp_buckets = elem.findall('./Buckets/Bucket')
|
||||
self.assertEqual(len(list(resp_buckets)), 2)
|
||||
for b in resp_buckets:
|
||||
self.assertTrue(b.find('Name').text in req_buckets)
|
||||
self.assertTrue(b.find('CreationDate') is not None)
|
||||
|
||||
def test_service_error_signature_not_match(self):
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = auth_error_conn.make_request('GET')
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
self.assertEqual(headers['content-type'], 'application/xml')
|
||||
|
||||
def test_service_error_no_date_header(self):
|
||||
# Without x-amz-date/Date header, that makes 403 forbidden
|
||||
status, headers, body = self.conn.make_request(
|
||||
'GET', headers={'Date': '', 'x-amz-date': ''})
|
||||
self.assertEqual(status, 403)
|
||||
self.assertEqual(get_error_code(body), 'AccessDenied')
|
||||
self.assertIn('AWS authentication requires a valid Date '
|
||||
'or x-amz-date header', body)
|
||||
|
||||
|
||||
class TestS3ApiServiceSigV4(TestS3ApiService):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
os.environ['S3_USE_SIGV4'] = "True"
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
del os.environ['S3_USE_SIGV4']
|
||||
|
||||
def setUp(self):
|
||||
super(TestS3ApiServiceSigV4, self).setUp()
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest2.main()
|
31
test/functional/s3api/utils.py
Normal file
31
test/functional/s3api/utils.py
Normal file
@ -0,0 +1,31 @@
|
||||
# Copyright (c) 2015 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from hashlib import md5
|
||||
from swift.common.middleware.s3api.etree import fromstring
|
||||
|
||||
|
||||
def get_error_code(body):
|
||||
elem = fromstring(body, 'Error')
|
||||
return elem.find('Code').text
|
||||
|
||||
|
||||
def get_error_msg(body):
|
||||
elem = fromstring(body, 'Error')
|
||||
return elem.find('Message').text
|
||||
|
||||
|
||||
def calculate_md5(body):
|
||||
return md5(body).digest().encode('base64').strip()
|
@ -17,6 +17,8 @@ auth_prefix = /auth/
|
||||
account = test
|
||||
username = tester
|
||||
password = testing
|
||||
s3_access_key = test:tester
|
||||
s3_secret_key = testing
|
||||
|
||||
# User on a second account (needs admin access to the account)
|
||||
account2 = test2
|
||||
@ -26,6 +28,9 @@ password2 = testing2
|
||||
# User on same account as first, but without admin access
|
||||
username3 = tester3
|
||||
password3 = testing3
|
||||
# s3api requires the same account with the primary one and different users
|
||||
s3_access_key2 = test:tester3
|
||||
s3_secret_key2 = testing3
|
||||
|
||||
# Fourth user is required for keystone v3 specific tests.
|
||||
# Account must be in a non-default domain.
|
||||
|
163
test/unit/common/middleware/s3api/__init__.py
Normal file
163
test/unit/common/middleware/s3api/__init__.py
Normal file
@ -0,0 +1,163 @@
|
||||
# Copyright (c) 2011-2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
from datetime import datetime
|
||||
import email
|
||||
import time
|
||||
|
||||
from swift.common import swob
|
||||
|
||||
from swift.common.middleware.s3api.s3api import S3ApiMiddleware
|
||||
from helpers import FakeSwift
|
||||
from swift.common.middleware.s3api.etree import fromstring
|
||||
from swift.common.middleware.s3api.utils import Config
|
||||
|
||||
|
||||
class FakeApp(object):
|
||||
def __init__(self):
|
||||
self.swift = FakeSwift()
|
||||
|
||||
def _update_s3_path_info(self, env):
|
||||
"""
|
||||
For S3 requests, Swift auth middleware replaces a user name in
|
||||
env['PATH_INFO'] with a valid tenant id.
|
||||
E.g. '/v1/test:tester/bucket/object' will become
|
||||
'/v1/AUTH_test/bucket/object'. This method emulates the behavior.
|
||||
"""
|
||||
_, authorization = env['HTTP_AUTHORIZATION'].split(' ')
|
||||
tenant_user, sign = authorization.rsplit(':', 1)
|
||||
tenant, user = tenant_user.rsplit(':', 1)
|
||||
|
||||
path = env['PATH_INFO']
|
||||
env['PATH_INFO'] = path.replace(tenant_user, 'AUTH_' + tenant)
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
if 'HTTP_AUTHORIZATION' in env:
|
||||
self._update_s3_path_info(env)
|
||||
|
||||
return self.swift(env, start_response)
|
||||
|
||||
|
||||
class S3ApiTestCase(unittest.TestCase):
|
||||
def __init__(self, name):
|
||||
unittest.TestCase.__init__(self, name)
|
||||
|
||||
def setUp(self):
|
||||
# setup default config
|
||||
self.conf = Config({
|
||||
'allow_no_owner': False,
|
||||
'location': 'US',
|
||||
'dns_compliant_bucket_names': True,
|
||||
'max_bucket_listing': 1000,
|
||||
'max_parts_listing': 1000,
|
||||
'max_multi_delete_objects': 1000,
|
||||
's3_acl': False,
|
||||
'storage_domain': 'localhost',
|
||||
'auth_pipeline_check': True,
|
||||
'max_upload_part_num': 1000,
|
||||
'check_bucket_owner': False,
|
||||
'force_swift_request_proxy_log': False,
|
||||
'allow_multipart_uploads': True,
|
||||
'min_segment_size': 5242880,
|
||||
})
|
||||
# those 2 settings has existed the original test setup
|
||||
self.conf.log_level = 'debug'
|
||||
|
||||
self.app = FakeApp()
|
||||
self.swift = self.app.swift
|
||||
self.s3api = S3ApiMiddleware(self.app, self.conf)
|
||||
|
||||
self.swift.register('HEAD', '/v1/AUTH_test',
|
||||
swob.HTTPOk, {}, None)
|
||||
self.swift.register('HEAD', '/v1/AUTH_test/bucket',
|
||||
swob.HTTPNoContent, {}, None)
|
||||
self.swift.register('PUT', '/v1/AUTH_test/bucket',
|
||||
swob.HTTPCreated, {}, None)
|
||||
self.swift.register('POST', '/v1/AUTH_test/bucket',
|
||||
swob.HTTPNoContent, {}, None)
|
||||
self.swift.register('DELETE', '/v1/AUTH_test/bucket',
|
||||
swob.HTTPNoContent, {}, None)
|
||||
|
||||
self.swift.register('GET', '/v1/AUTH_test/bucket/object',
|
||||
swob.HTTPOk, {}, "")
|
||||
self.swift.register('PUT', '/v1/AUTH_test/bucket/object',
|
||||
swob.HTTPCreated, {}, None)
|
||||
self.swift.register('DELETE', '/v1/AUTH_test/bucket/object',
|
||||
swob.HTTPNoContent, {}, None)
|
||||
|
||||
def _get_error_code(self, body):
|
||||
elem = fromstring(body, 'Error')
|
||||
return elem.find('./Code').text
|
||||
|
||||
def _get_error_message(self, body):
|
||||
elem = fromstring(body, 'Error')
|
||||
return elem.find('./Message').text
|
||||
|
||||
def _test_method_error(self, method, path, response_class, headers={}):
|
||||
if not path.startswith('/'):
|
||||
path = '/' + path # add a missing slash before the path
|
||||
|
||||
uri = '/v1/AUTH_test'
|
||||
if path != '/':
|
||||
uri += path
|
||||
|
||||
self.swift.register(method, uri, response_class, headers, None)
|
||||
headers.update({'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
req = swob.Request.blank(path, environ={'REQUEST_METHOD': method},
|
||||
headers=headers)
|
||||
status, headers, body = self.call_s3api(req)
|
||||
return self._get_error_code(body)
|
||||
|
||||
def get_date_header(self):
|
||||
# email.utils.formatdate returns utc timestamp in default
|
||||
return email.utils.formatdate(time.time())
|
||||
|
||||
def get_v4_amz_date_header(self):
|
||||
return datetime.utcnow().strftime('%Y%m%dT%H%M%SZ')
|
||||
|
||||
def call_app(self, req, app=None, expect_exception=False):
|
||||
if app is None:
|
||||
app = self.app
|
||||
|
||||
req.headers.setdefault("User-Agent", "Mozzarella Foxfire")
|
||||
|
||||
status = [None]
|
||||
headers = [None]
|
||||
|
||||
def start_response(s, h, ei=None):
|
||||
status[0] = s
|
||||
headers[0] = swob.HeaderKeyDict(h)
|
||||
|
||||
body_iter = app(req.environ, start_response)
|
||||
body = ''
|
||||
caught_exc = None
|
||||
try:
|
||||
for chunk in body_iter:
|
||||
body += chunk
|
||||
except Exception as exc:
|
||||
if expect_exception:
|
||||
caught_exc = exc
|
||||
else:
|
||||
raise
|
||||
|
||||
if expect_exception:
|
||||
return status[0], headers[0], body, caught_exc
|
||||
else:
|
||||
return status[0], headers[0], body
|
||||
|
||||
def call_s3api(self, req, **kwargs):
|
||||
return self.call_app(req, app=self.s3api, **kwargs)
|
18
test/unit/common/middleware/s3api/exceptions.py
Normal file
18
test/unit/common/middleware/s3api/exceptions.py
Normal file
@ -0,0 +1,18 @@
|
||||
# Copyright (c) 2013 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
class NotMethodException(Exception):
|
||||
pass
|
185
test/unit/common/middleware/s3api/helpers.py
Normal file
185
test/unit/common/middleware/s3api/helpers.py
Normal file
@ -0,0 +1,185 @@
|
||||
# Copyright (c) 2013 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This stuff can't live in test/unit/__init__.py due to its swob dependency.
|
||||
|
||||
from copy import deepcopy
|
||||
from hashlib import md5
|
||||
from swift.common import swob
|
||||
from swift.common.utils import split_path
|
||||
from swift.common.request_helpers import is_sys_meta
|
||||
|
||||
|
||||
class FakeSwift(object):
|
||||
"""
|
||||
A good-enough fake Swift proxy server to use in testing middleware.
|
||||
"""
|
||||
|
||||
def __init__(self, s3_acl=False):
|
||||
self._calls = []
|
||||
self.req_method_paths = []
|
||||
self.swift_sources = []
|
||||
self.uploaded = {}
|
||||
# mapping of (method, path) --> (response class, headers, body)
|
||||
self._responses = {}
|
||||
self.s3_acl = s3_acl
|
||||
|
||||
def _fake_auth_middleware(self, env):
|
||||
if 'swift.authorize_override' in env:
|
||||
return
|
||||
|
||||
if 'HTTP_AUTHORIZATION' not in env:
|
||||
return
|
||||
|
||||
_, authorization = env['HTTP_AUTHORIZATION'].split(' ')
|
||||
tenant_user, sign = authorization.rsplit(':', 1)
|
||||
tenant, user = tenant_user.rsplit(':', 1)
|
||||
|
||||
path = env['PATH_INFO']
|
||||
env['PATH_INFO'] = path.replace(tenant_user, 'AUTH_' + tenant)
|
||||
|
||||
env['REMOTE_USER'] = 'authorized'
|
||||
|
||||
if env['REQUEST_METHOD'] == 'TEST':
|
||||
# AccessDenied by default at s3acl authenticate
|
||||
env['swift.authorize'] = \
|
||||
lambda req: swob.HTTPForbidden(request=req)
|
||||
else:
|
||||
env['swift.authorize'] = lambda req: None
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
if self.s3_acl:
|
||||
self._fake_auth_middleware(env)
|
||||
|
||||
req = swob.Request(env)
|
||||
method = env['REQUEST_METHOD']
|
||||
path = env['PATH_INFO']
|
||||
_, acc, cont, obj = split_path(env['PATH_INFO'], 0, 4,
|
||||
rest_with_last=True)
|
||||
if env.get('QUERY_STRING'):
|
||||
path += '?' + env['QUERY_STRING']
|
||||
|
||||
if 'swift.authorize' in env:
|
||||
resp = env['swift.authorize'](req)
|
||||
if resp:
|
||||
return resp(env, start_response)
|
||||
|
||||
headers = req.headers
|
||||
self._calls.append((method, path, headers))
|
||||
self.swift_sources.append(env.get('swift.source'))
|
||||
|
||||
try:
|
||||
resp_class, raw_headers, body = self._responses[(method, path)]
|
||||
headers = swob.HeaderKeyDict(raw_headers)
|
||||
except KeyError:
|
||||
# FIXME: suppress print state error for python3 compatibility.
|
||||
# pylint: disable-msg=E1601
|
||||
if (env.get('QUERY_STRING')
|
||||
and (method, env['PATH_INFO']) in self._responses):
|
||||
resp_class, raw_headers, body = self._responses[
|
||||
(method, env['PATH_INFO'])]
|
||||
headers = swob.HeaderKeyDict(raw_headers)
|
||||
elif method == 'HEAD' and ('GET', path) in self._responses:
|
||||
resp_class, raw_headers, _ = self._responses[('GET', path)]
|
||||
body = None
|
||||
headers = swob.HeaderKeyDict(raw_headers)
|
||||
elif method == 'GET' and obj and path in self.uploaded:
|
||||
resp_class = swob.HTTPOk
|
||||
headers, body = self.uploaded[path]
|
||||
else:
|
||||
print("Didn't find %r in allowed responses" %
|
||||
((method, path),))
|
||||
raise
|
||||
|
||||
# simulate object PUT
|
||||
if method == 'PUT' and obj:
|
||||
input = env['wsgi.input'].read()
|
||||
etag = md5(input).hexdigest()
|
||||
headers.setdefault('Etag', etag)
|
||||
headers.setdefault('Content-Length', len(input))
|
||||
|
||||
# keep it for subsequent GET requests later
|
||||
self.uploaded[path] = (deepcopy(headers), input)
|
||||
if "CONTENT_TYPE" in env:
|
||||
self.uploaded[path][0]['Content-Type'] = env["CONTENT_TYPE"]
|
||||
|
||||
# range requests ought to work, but copies are special
|
||||
support_range_and_conditional = not (
|
||||
method == 'PUT' and
|
||||
'X-Copy-From' in req.headers and
|
||||
'Range' in req.headers)
|
||||
resp = resp_class(req=req, headers=headers, body=body,
|
||||
conditional_response=support_range_and_conditional)
|
||||
return resp(env, start_response)
|
||||
|
||||
@property
|
||||
def calls(self):
|
||||
return [(method, path) for method, path, headers in self._calls]
|
||||
|
||||
@property
|
||||
def calls_with_headers(self):
|
||||
return self._calls
|
||||
|
||||
@property
|
||||
def call_count(self):
|
||||
return len(self._calls)
|
||||
|
||||
def register(self, method, path, response_class, headers, body):
|
||||
# assuming the path format like /v1/account/container/object
|
||||
resource_map = ['account', 'container', 'object']
|
||||
acos = filter(None, split_path(path, 0, 4, True)[1:])
|
||||
index = len(acos) - 1
|
||||
resource = resource_map[index]
|
||||
if (method, path) in self._responses:
|
||||
old_headers = self._responses[(method, path)][1]
|
||||
headers = headers.copy()
|
||||
for key, value in old_headers.iteritems():
|
||||
if is_sys_meta(resource, key) and key not in headers:
|
||||
# keep old sysmeta for s3acl
|
||||
headers.update({key: value})
|
||||
|
||||
self._responses[(method, path)] = (response_class, headers, body)
|
||||
|
||||
def register_unconditionally(self, method, path, response_class, headers,
|
||||
body):
|
||||
# register() keeps old sysmeta around, but
|
||||
# register_unconditionally() keeps nothing.
|
||||
self._responses[(method, path)] = (response_class, headers, body)
|
||||
|
||||
def clear_calls(self):
|
||||
del self._calls[:]
|
||||
|
||||
|
||||
class UnreadableInput(object):
|
||||
# Some clients will send neither a Content-Length nor a Transfer-Encoding
|
||||
# header, which will cause (some versions of?) eventlet to bomb out on
|
||||
# reads. This class helps us simulate that behavior.
|
||||
def __init__(self, test_case):
|
||||
self.calls = 0
|
||||
self.test_case = test_case
|
||||
|
||||
def read(self, *a, **kw):
|
||||
self.calls += 1
|
||||
# Calling wsgi.input.read with neither a Content-Length nor
|
||||
# a Transfer-Encoding header will raise TypeError (See
|
||||
# https://bugs.launchpad.net/swift3/+bug/1593870 in detail)
|
||||
# This unreadable class emulates the behavior
|
||||
raise TypeError
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.test_case.assertEqual(0, self.calls)
|
230
test/unit/common/middleware/s3api/test_acl.py
Normal file
230
test/unit/common/middleware/s3api/test_acl.py
Normal file
@ -0,0 +1,230 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
import mock
|
||||
|
||||
from cStringIO import StringIO
|
||||
from hashlib import md5
|
||||
|
||||
from swift.common.swob import Request, HTTPAccepted
|
||||
from swift.common.middleware.s3api.etree import fromstring, tostring, \
|
||||
Element, SubElement, XMLNS_XSI
|
||||
from swift.common.middleware.s3api.s3response import InvalidArgument
|
||||
from swift.common.middleware.s3api.acl_utils import handle_acl_header
|
||||
|
||||
from test.unit.common.middleware.s3api import S3ApiTestCase
|
||||
from test.unit.common.middleware.s3api.helpers import UnreadableInput
|
||||
from test.unit.common.middleware.s3api.test_s3_acl import s3acl
|
||||
|
||||
|
||||
class TestS3ApiAcl(S3ApiTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestS3ApiAcl, self).setUp()
|
||||
# All ACL API should be called against to existing bucket.
|
||||
self.swift.register('PUT', '/v1/AUTH_test/bucket',
|
||||
HTTPAccepted, {}, None)
|
||||
|
||||
def _check_acl(self, owner, body):
|
||||
elem = fromstring(body, 'AccessControlPolicy')
|
||||
permission = elem.find('./AccessControlList/Grant/Permission').text
|
||||
self.assertEqual(permission, 'FULL_CONTROL')
|
||||
name = elem.find('./AccessControlList/Grant/Grantee/ID').text
|
||||
self.assertEqual(name, owner)
|
||||
|
||||
def test_bucket_acl_GET(self):
|
||||
req = Request.blank('/bucket?acl',
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self._check_acl('test:tester', body)
|
||||
|
||||
def test_bucket_acl_PUT(self):
|
||||
elem = Element('AccessControlPolicy')
|
||||
owner = SubElement(elem, 'Owner')
|
||||
SubElement(owner, 'ID').text = 'id'
|
||||
acl = SubElement(elem, 'AccessControlList')
|
||||
grant = SubElement(acl, 'Grant')
|
||||
grantee = SubElement(grant, 'Grantee', nsmap={'xsi': XMLNS_XSI})
|
||||
grantee.set('{%s}type' % XMLNS_XSI, 'Group')
|
||||
SubElement(grantee, 'URI').text = \
|
||||
'http://acs.amazonaws.com/groups/global/AllUsers'
|
||||
SubElement(grant, 'Permission').text = 'READ'
|
||||
|
||||
xml = tostring(elem)
|
||||
req = Request.blank('/bucket?acl',
|
||||
environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()},
|
||||
body=xml)
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
|
||||
req = Request.blank('/bucket?acl',
|
||||
environ={'REQUEST_METHOD': 'PUT',
|
||||
'wsgi.input': StringIO(xml)},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header(),
|
||||
'Transfer-Encoding': 'chunked'})
|
||||
self.assertIsNone(req.content_length)
|
||||
self.assertIsNone(req.message_length())
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
|
||||
def test_bucket_canned_acl_PUT(self):
|
||||
req = Request.blank('/bucket?acl',
|
||||
environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header(),
|
||||
'X-AMZ-ACL': 'public-read'})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
|
||||
@s3acl(s3acl_only=True)
|
||||
def test_bucket_canned_acl_PUT_with_s3acl(self):
|
||||
req = Request.blank('/bucket?acl',
|
||||
environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header(),
|
||||
'X-AMZ-ACL': 'public-read'})
|
||||
with mock.patch('swift.common.middleware.s3api.s3request.'
|
||||
'handle_acl_header') as mock_handler:
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
self.assertEqual(mock_handler.call_count, 0)
|
||||
|
||||
def test_bucket_fails_with_both_acl_header_and_xml_PUT(self):
|
||||
elem = Element('AccessControlPolicy')
|
||||
owner = SubElement(elem, 'Owner')
|
||||
SubElement(owner, 'ID').text = 'id'
|
||||
acl = SubElement(elem, 'AccessControlList')
|
||||
grant = SubElement(acl, 'Grant')
|
||||
grantee = SubElement(grant, 'Grantee', nsmap={'xsi': XMLNS_XSI})
|
||||
grantee.set('{%s}type' % XMLNS_XSI, 'Group')
|
||||
SubElement(grantee, 'URI').text = \
|
||||
'http://acs.amazonaws.com/groups/global/AllUsers'
|
||||
SubElement(grant, 'Permission').text = 'READ'
|
||||
|
||||
xml = tostring(elem)
|
||||
req = Request.blank('/bucket?acl',
|
||||
environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header(),
|
||||
'X-AMZ-ACL': 'public-read'},
|
||||
body=xml)
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(self._get_error_code(body),
|
||||
'UnexpectedContent')
|
||||
|
||||
def _test_put_no_body(self, use_content_length=False,
|
||||
use_transfer_encoding=False, string_to_md5=''):
|
||||
content_md5 = md5(string_to_md5).digest().encode('base64').strip()
|
||||
with UnreadableInput(self) as fake_input:
|
||||
req = Request.blank(
|
||||
'/bucket?acl',
|
||||
environ={
|
||||
'REQUEST_METHOD': 'PUT',
|
||||
'wsgi.input': fake_input},
|
||||
headers={
|
||||
'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header(),
|
||||
'Content-MD5': content_md5},
|
||||
body='')
|
||||
if not use_content_length:
|
||||
req.environ.pop('CONTENT_LENGTH')
|
||||
if use_transfer_encoding:
|
||||
req.environ['HTTP_TRANSFER_ENCODING'] = 'chunked'
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status, '400 Bad Request')
|
||||
self.assertEqual(self._get_error_code(body), 'MissingSecurityHeader')
|
||||
self.assertEqual(self._get_error_message(body),
|
||||
'Your request was missing a required header.')
|
||||
self.assertIn('<MissingHeaderName>x-amz-acl</MissingHeaderName>', body)
|
||||
|
||||
@s3acl
|
||||
def test_bucket_fails_with_neither_acl_header_nor_xml_PUT(self):
|
||||
self._test_put_no_body()
|
||||
self._test_put_no_body(string_to_md5='test')
|
||||
self._test_put_no_body(use_content_length=True)
|
||||
self._test_put_no_body(use_content_length=True, string_to_md5='test')
|
||||
self._test_put_no_body(use_transfer_encoding=True)
|
||||
self._test_put_no_body(use_transfer_encoding=True, string_to_md5='zz')
|
||||
|
||||
def test_object_acl_GET(self):
|
||||
req = Request.blank('/bucket/object?acl',
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self._check_acl('test:tester', body)
|
||||
|
||||
def test_invalid_xml(self):
|
||||
req = Request.blank('/bucket?acl',
|
||||
environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()},
|
||||
body='invalid')
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(self._get_error_code(body), 'MalformedACLError')
|
||||
|
||||
def test_handle_acl_header(self):
|
||||
def check_generated_acl_header(acl, targets):
|
||||
req = Request.blank('/bucket',
|
||||
headers={'X-Amz-Acl': acl})
|
||||
handle_acl_header(req)
|
||||
for target in targets:
|
||||
self.assertTrue(target[0] in req.headers)
|
||||
self.assertEqual(req.headers[target[0]], target[1])
|
||||
|
||||
check_generated_acl_header('public-read',
|
||||
[('X-Container-Read', '.r:*,.rlistings')])
|
||||
check_generated_acl_header('public-read-write',
|
||||
[('X-Container-Read', '.r:*,.rlistings'),
|
||||
('X-Container-Write', '.r:*')])
|
||||
check_generated_acl_header('private',
|
||||
[('X-Container-Read', '.'),
|
||||
('X-Container-Write', '.')])
|
||||
|
||||
@s3acl(s3acl_only=True)
|
||||
def test_handle_acl_header_with_s3acl(self):
|
||||
def check_generated_acl_header(acl, targets):
|
||||
req = Request.blank('/bucket',
|
||||
headers={'X-Amz-Acl': acl})
|
||||
for target in targets:
|
||||
self.assertTrue(target not in req.headers)
|
||||
self.assertTrue('HTTP_X_AMZ_ACL' in req.environ)
|
||||
# TODO: add transration and assertion for s3acl
|
||||
|
||||
check_generated_acl_header('public-read',
|
||||
['X-Container-Read'])
|
||||
check_generated_acl_header('public-read-write',
|
||||
['X-Container-Read', 'X-Container-Write'])
|
||||
check_generated_acl_header('private',
|
||||
['X-Container-Read', 'X-Container-Write'])
|
||||
|
||||
def test_handle_acl_with_invalid_header_string(self):
|
||||
req = Request.blank('/bucket', headers={'X-Amz-Acl': 'invalid'})
|
||||
with self.assertRaises(InvalidArgument) as cm:
|
||||
handle_acl_header(req)
|
||||
self.assertTrue('argument_name' in cm.exception.info)
|
||||
self.assertEqual(cm.exception.info['argument_name'], 'x-amz-acl')
|
||||
self.assertTrue('argument_value' in cm.exception.info)
|
||||
self.assertEqual(cm.exception.info['argument_value'], 'invalid')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
42
test/unit/common/middleware/s3api/test_acl_handlers.py
Normal file
42
test/unit/common/middleware/s3api/test_acl_handlers.py
Normal file
@ -0,0 +1,42 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
|
||||
from swift.common.middleware.s3api.acl_handlers import S3AclHandler, \
|
||||
BucketAclHandler, ObjectAclHandler, BaseAclHandler, PartAclHandler, \
|
||||
UploadAclHandler, UploadsAclHandler, get_acl_handler
|
||||
|
||||
|
||||
class TestAclHandlers(unittest.TestCase):
|
||||
def test_get_acl_handler(self):
|
||||
expected_handlers = (('Bucket', BucketAclHandler),
|
||||
('Object', ObjectAclHandler),
|
||||
('S3Acl', S3AclHandler),
|
||||
('Part', PartAclHandler),
|
||||
('Upload', UploadAclHandler),
|
||||
('Uploads', UploadsAclHandler),
|
||||
('Foo', BaseAclHandler))
|
||||
for name, expected in expected_handlers:
|
||||
handler = get_acl_handler(name)
|
||||
self.assertTrue(issubclass(handler, expected))
|
||||
|
||||
def test_handle_acl(self):
|
||||
# we have already have tests for s3_acl checking at test_s3_acl.py
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
49
test/unit/common/middleware/s3api/test_acl_utils.py
Normal file
49
test/unit/common/middleware/s3api/test_acl_utils.py
Normal file
@ -0,0 +1,49 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
|
||||
from swift.common.swob import Request
|
||||
from swift.common.middleware.s3api.acl_utils import handle_acl_header
|
||||
|
||||
from test.unit.common.middleware.s3api import S3ApiTestCase
|
||||
|
||||
|
||||
class TestS3ApiAclUtils(S3ApiTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestS3ApiAclUtils, self).setUp()
|
||||
|
||||
def test_handle_acl_header(self):
|
||||
def check_generated_acl_header(acl, targets):
|
||||
req = Request.blank('/bucket',
|
||||
headers={'X-Amz-Acl': acl})
|
||||
handle_acl_header(req)
|
||||
for target in targets:
|
||||
self.assertTrue(target[0] in req.headers)
|
||||
self.assertEqual(req.headers[target[0]], target[1])
|
||||
|
||||
check_generated_acl_header('public-read',
|
||||
[('X-Container-Read', '.r:*,.rlistings')])
|
||||
check_generated_acl_header('public-read-write',
|
||||
[('X-Container-Read', '.r:*,.rlistings'),
|
||||
('X-Container-Write', '.r:*')])
|
||||
check_generated_acl_header('private',
|
||||
[('X-Container-Read', '.'),
|
||||
('X-Container-Write', '.')])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
755
test/unit/common/middleware/s3api/test_bucket.py
Normal file
755
test/unit/common/middleware/s3api/test_bucket.py
Normal file
@ -0,0 +1,755 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
import cgi
|
||||
|
||||
from swift.common import swob
|
||||
from swift.common.swob import Request
|
||||
from swift.common.utils import json
|
||||
|
||||
from swift.common.middleware.s3api.etree import fromstring, tostring, \
|
||||
Element, SubElement
|
||||
from swift.common.middleware.s3api.subresource import Owner, encode_acl, \
|
||||
ACLPublicRead
|
||||
from swift.common.middleware.s3api.s3request import MAX_32BIT_INT
|
||||
|
||||
from test.unit.common.middleware.s3api import S3ApiTestCase
|
||||
from test.unit.common.middleware.s3api.test_s3_acl import s3acl
|
||||
from test.unit.common.middleware.s3api.helpers import UnreadableInput
|
||||
|
||||
|
||||
class TestS3ApiBucket(S3ApiTestCase):
|
||||
def setup_objects(self):
|
||||
self.objects = (('rose', '2011-01-05T02:19:14.275290', 0, 303),
|
||||
('viola', '2011-01-05T02:19:14.275290', '0', 3909),
|
||||
('lily', '2011-01-05T02:19:14.275290', '0', '3909'),
|
||||
('with space', '2011-01-05T02:19:14.275290', 0, 390),
|
||||
('with%20space', '2011-01-05T02:19:14.275290', 0, 390))
|
||||
|
||||
objects = map(
|
||||
lambda item: {'name': str(item[0]), 'last_modified': str(item[1]),
|
||||
'hash': str(item[2]), 'bytes': str(item[3])},
|
||||
list(self.objects))
|
||||
object_list = json.dumps(objects)
|
||||
|
||||
self.prefixes = ['rose', 'viola', 'lily']
|
||||
object_list_subdir = []
|
||||
for p in self.prefixes:
|
||||
object_list_subdir.append({"subdir": p})
|
||||
|
||||
self.swift.register('DELETE', '/v1/AUTH_test/bucket+segments',
|
||||
swob.HTTPNoContent, {}, json.dumps([]))
|
||||
self.swift.register('DELETE', '/v1/AUTH_test/bucket+segments/rose',
|
||||
swob.HTTPNoContent, {}, json.dumps([]))
|
||||
self.swift.register('DELETE', '/v1/AUTH_test/bucket+segments/viola',
|
||||
swob.HTTPNoContent, {}, json.dumps([]))
|
||||
self.swift.register('DELETE', '/v1/AUTH_test/bucket+segments/lily',
|
||||
swob.HTTPNoContent, {}, json.dumps([]))
|
||||
self.swift.register('DELETE', '/v1/AUTH_test/bucket+segments/with'
|
||||
' space', swob.HTTPNoContent, {}, json.dumps([]))
|
||||
self.swift.register('DELETE', '/v1/AUTH_test/bucket+segments/with%20'
|
||||
'space', swob.HTTPNoContent, {}, json.dumps([]))
|
||||
self.swift.register('GET', '/v1/AUTH_test/bucket+segments?format=json'
|
||||
'&marker=with%2520space', swob.HTTPOk, {},
|
||||
json.dumps([]))
|
||||
self.swift.register('GET', '/v1/AUTH_test/bucket+segments?format=json'
|
||||
'&marker=', swob.HTTPOk, {}, object_list)
|
||||
self.swift.register('HEAD', '/v1/AUTH_test/junk', swob.HTTPNoContent,
|
||||
{}, None)
|
||||
self.swift.register('HEAD', '/v1/AUTH_test/nojunk', swob.HTTPNotFound,
|
||||
{}, None)
|
||||
self.swift.register('GET', '/v1/AUTH_test/junk', swob.HTTPOk, {},
|
||||
object_list)
|
||||
self.swift.register(
|
||||
'GET',
|
||||
'/v1/AUTH_test/junk?delimiter=a&format=json&limit=3&marker=viola',
|
||||
swob.HTTPOk, {}, json.dumps(objects[2:]))
|
||||
self.swift.register('GET', '/v1/AUTH_test/junk-subdir', swob.HTTPOk,
|
||||
{}, json.dumps(object_list_subdir))
|
||||
self.swift.register(
|
||||
'GET',
|
||||
'/v1/AUTH_test/subdirs?delimiter=/&format=json&limit=3',
|
||||
swob.HTTPOk, {}, json.dumps([
|
||||
{'subdir': 'nothing/'},
|
||||
{'subdir': 'but/'},
|
||||
{'subdir': 'subdirs/'},
|
||||
]))
|
||||
|
||||
def setUp(self):
|
||||
super(TestS3ApiBucket, self).setUp()
|
||||
self.setup_objects()
|
||||
|
||||
def test_bucket_HEAD(self):
|
||||
req = Request.blank('/junk',
|
||||
environ={'REQUEST_METHOD': 'HEAD'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
|
||||
def test_bucket_HEAD_error(self):
|
||||
req = Request.blank('/nojunk',
|
||||
environ={'REQUEST_METHOD': 'HEAD'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '404')
|
||||
self.assertEqual(body, '') # sanity
|
||||
|
||||
def test_bucket_HEAD_slash(self):
|
||||
req = Request.blank('/junk/',
|
||||
environ={'REQUEST_METHOD': 'HEAD'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
|
||||
def test_bucket_HEAD_slash_error(self):
|
||||
req = Request.blank('/nojunk/',
|
||||
environ={'REQUEST_METHOD': 'HEAD'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '404')
|
||||
|
||||
@s3acl
|
||||
def test_bucket_GET_error(self):
|
||||
code = self._test_method_error('GET', '/bucket', swob.HTTPUnauthorized)
|
||||
self.assertEqual(code, 'SignatureDoesNotMatch')
|
||||
code = self._test_method_error('GET', '/bucket', swob.HTTPForbidden)
|
||||
self.assertEqual(code, 'AccessDenied')
|
||||
code = self._test_method_error('GET', '/bucket', swob.HTTPNotFound)
|
||||
self.assertEqual(code, 'NoSuchBucket')
|
||||
code = self._test_method_error('GET', '/bucket', swob.HTTPServerError)
|
||||
self.assertEqual(code, 'InternalError')
|
||||
|
||||
def test_bucket_GET(self):
|
||||
bucket_name = 'junk'
|
||||
req = Request.blank('/%s' % bucket_name,
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
name = elem.find('./Name').text
|
||||
self.assertEqual(name, bucket_name)
|
||||
|
||||
objects = elem.iterchildren('Contents')
|
||||
|
||||
names = []
|
||||
for o in objects:
|
||||
names.append(o.find('./Key').text)
|
||||
self.assertEqual('2011-01-05T02:19:14.275Z',
|
||||
o.find('./LastModified').text)
|
||||
self.assertEqual('"0"', o.find('./ETag').text)
|
||||
|
||||
self.assertEqual(len(names), len(self.objects))
|
||||
for i in self.objects:
|
||||
self.assertTrue(i[0] in names)
|
||||
|
||||
def test_bucket_GET_subdir(self):
|
||||
bucket_name = 'junk-subdir'
|
||||
req = Request.blank('/%s' % bucket_name,
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
name = elem.find('./Name').text
|
||||
self.assertEqual(name, bucket_name)
|
||||
|
||||
prefixes = elem.findall('CommonPrefixes')
|
||||
|
||||
self.assertEqual(len(prefixes), len(self.prefixes))
|
||||
for p in prefixes:
|
||||
self.assertTrue(p.find('./Prefix').text in self.prefixes)
|
||||
|
||||
def test_bucket_GET_is_truncated(self):
|
||||
bucket_name = 'junk'
|
||||
|
||||
req = Request.blank('/%s?max-keys=5' % bucket_name,
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('./IsTruncated').text, 'false')
|
||||
|
||||
req = Request.blank('/%s?max-keys=4' % bucket_name,
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('./IsTruncated').text, 'true')
|
||||
|
||||
req = Request.blank('/subdirs?delimiter=/&max-keys=2',
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('./IsTruncated').text, 'true')
|
||||
self.assertEqual(elem.find('./NextMarker').text, 'but/')
|
||||
|
||||
def test_bucket_GET_v2_is_truncated(self):
|
||||
bucket_name = 'junk'
|
||||
|
||||
req = Request.blank('/%s?list-type=2&max-keys=5' % bucket_name,
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('./KeyCount').text, '5')
|
||||
self.assertEqual(elem.find('./IsTruncated').text, 'false')
|
||||
|
||||
req = Request.blank('/%s?list-type=2&max-keys=4' % bucket_name,
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertIsNotNone(elem.find('./NextContinuationToken'))
|
||||
self.assertEqual(elem.find('./KeyCount').text, '4')
|
||||
self.assertEqual(elem.find('./IsTruncated').text, 'true')
|
||||
|
||||
req = Request.blank('/subdirs?list-type=2&delimiter=/&max-keys=2',
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertIsNotNone(elem.find('./NextContinuationToken'))
|
||||
self.assertEqual(elem.find('./KeyCount').text, '2')
|
||||
self.assertEqual(elem.find('./IsTruncated').text, 'true')
|
||||
|
||||
def test_bucket_GET_max_keys(self):
|
||||
bucket_name = 'junk'
|
||||
|
||||
req = Request.blank('/%s?max-keys=5' % bucket_name,
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('./MaxKeys').text, '5')
|
||||
_, path = self.swift.calls[-1]
|
||||
_, query_string = path.split('?')
|
||||
args = dict(cgi.parse_qsl(query_string))
|
||||
self.assertEqual(args['limit'], '6')
|
||||
|
||||
req = Request.blank('/%s?max-keys=5000' % bucket_name,
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('./MaxKeys').text, '5000')
|
||||
_, path = self.swift.calls[-1]
|
||||
_, query_string = path.split('?')
|
||||
args = dict(cgi.parse_qsl(query_string))
|
||||
self.assertEqual(args['limit'], '1001')
|
||||
|
||||
def test_bucket_GET_str_max_keys(self):
|
||||
bucket_name = 'junk'
|
||||
|
||||
req = Request.blank('/%s?max-keys=invalid' % bucket_name,
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(self._get_error_code(body), 'InvalidArgument')
|
||||
|
||||
def test_bucket_GET_negative_max_keys(self):
|
||||
bucket_name = 'junk'
|
||||
|
||||
req = Request.blank('/%s?max-keys=-1' % bucket_name,
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(self._get_error_code(body), 'InvalidArgument')
|
||||
|
||||
def test_bucket_GET_over_32bit_int_max_keys(self):
|
||||
bucket_name = 'junk'
|
||||
|
||||
req = Request.blank('/%s?max-keys=%s' %
|
||||
(bucket_name, MAX_32BIT_INT + 1),
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(self._get_error_code(body), 'InvalidArgument')
|
||||
|
||||
def test_bucket_GET_passthroughs(self):
|
||||
bucket_name = 'junk'
|
||||
req = Request.blank('/%s?delimiter=a&marker=b&prefix=c' % bucket_name,
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('./Prefix').text, 'c')
|
||||
self.assertEqual(elem.find('./Marker').text, 'b')
|
||||
self.assertEqual(elem.find('./Delimiter').text, 'a')
|
||||
_, path = self.swift.calls[-1]
|
||||
_, query_string = path.split('?')
|
||||
args = dict(cgi.parse_qsl(query_string))
|
||||
self.assertEqual(args['delimiter'], 'a')
|
||||
self.assertEqual(args['marker'], 'b')
|
||||
self.assertEqual(args['prefix'], 'c')
|
||||
|
||||
def test_bucket_GET_v2_passthroughs(self):
|
||||
bucket_name = 'junk'
|
||||
req = Request.blank(
|
||||
'/%s?list-type=2&delimiter=a&start-after=b&prefix=c' % bucket_name,
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('./Prefix').text, 'c')
|
||||
self.assertEqual(elem.find('./StartAfter').text, 'b')
|
||||
self.assertEqual(elem.find('./Delimiter').text, 'a')
|
||||
_, path = self.swift.calls[-1]
|
||||
_, query_string = path.split('?')
|
||||
args = dict(cgi.parse_qsl(query_string))
|
||||
self.assertEqual(args['delimiter'], 'a')
|
||||
# "start-after" is converted to "marker"
|
||||
self.assertEqual(args['marker'], 'b')
|
||||
self.assertEqual(args['prefix'], 'c')
|
||||
|
||||
def test_bucket_GET_with_nonascii_queries(self):
|
||||
bucket_name = 'junk'
|
||||
req = Request.blank(
|
||||
'/%s?delimiter=\xef\xbc\xa1&marker=\xef\xbc\xa2&'
|
||||
'prefix=\xef\xbc\xa3' % bucket_name,
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('./Prefix').text, '\xef\xbc\xa3')
|
||||
self.assertEqual(elem.find('./Marker').text, '\xef\xbc\xa2')
|
||||
self.assertEqual(elem.find('./Delimiter').text, '\xef\xbc\xa1')
|
||||
_, path = self.swift.calls[-1]
|
||||
_, query_string = path.split('?')
|
||||
args = dict(cgi.parse_qsl(query_string))
|
||||
self.assertEqual(args['delimiter'], '\xef\xbc\xa1')
|
||||
self.assertEqual(args['marker'], '\xef\xbc\xa2')
|
||||
self.assertEqual(args['prefix'], '\xef\xbc\xa3')
|
||||
|
||||
def test_bucket_GET_v2_with_nonascii_queries(self):
|
||||
bucket_name = 'junk'
|
||||
req = Request.blank(
|
||||
'/%s?list-type=2&delimiter=\xef\xbc\xa1&start-after=\xef\xbc\xa2&'
|
||||
'prefix=\xef\xbc\xa3' % bucket_name,
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('./Prefix').text, '\xef\xbc\xa3')
|
||||
self.assertEqual(elem.find('./StartAfter').text, '\xef\xbc\xa2')
|
||||
self.assertEqual(elem.find('./Delimiter').text, '\xef\xbc\xa1')
|
||||
_, path = self.swift.calls[-1]
|
||||
_, query_string = path.split('?')
|
||||
args = dict(cgi.parse_qsl(query_string))
|
||||
self.assertEqual(args['delimiter'], '\xef\xbc\xa1')
|
||||
self.assertEqual(args['marker'], '\xef\xbc\xa2')
|
||||
self.assertEqual(args['prefix'], '\xef\xbc\xa3')
|
||||
|
||||
def test_bucket_GET_with_delimiter_max_keys(self):
|
||||
bucket_name = 'junk'
|
||||
req = Request.blank('/%s?delimiter=a&max-keys=2' % bucket_name,
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('./NextMarker').text, 'viola')
|
||||
self.assertEqual(elem.find('./MaxKeys').text, '2')
|
||||
self.assertEqual(elem.find('./IsTruncated').text, 'true')
|
||||
|
||||
def test_bucket_GET_v2_with_delimiter_max_keys(self):
|
||||
bucket_name = 'junk'
|
||||
req = Request.blank(
|
||||
'/%s?list-type=2&delimiter=a&max-keys=2' % bucket_name,
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
next_token = elem.find('./NextContinuationToken')
|
||||
self.assertIsNotNone(next_token)
|
||||
self.assertEqual(elem.find('./MaxKeys').text, '2')
|
||||
self.assertEqual(elem.find('./IsTruncated').text, 'true')
|
||||
|
||||
req = Request.blank(
|
||||
'/%s?list-type=2&delimiter=a&max-keys=2&continuation-token=%s' %
|
||||
(bucket_name, next_token.text),
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
names = [o.find('./Key').text for o in elem.iterchildren('Contents')]
|
||||
self.assertEqual(names[0], 'lily')
|
||||
|
||||
def test_bucket_GET_subdir_with_delimiter_max_keys(self):
|
||||
bucket_name = 'junk-subdir'
|
||||
req = Request.blank('/%s?delimiter=a&max-keys=1' % bucket_name,
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('./NextMarker').text, 'rose')
|
||||
self.assertEqual(elem.find('./MaxKeys').text, '1')
|
||||
self.assertEqual(elem.find('./IsTruncated').text, 'true')
|
||||
|
||||
def test_bucket_GET_v2_fetch_owner(self):
|
||||
bucket_name = 'junk'
|
||||
req = Request.blank('/%s?list-type=2' % bucket_name,
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
name = elem.find('./Name').text
|
||||
self.assertEqual(name, bucket_name)
|
||||
|
||||
objects = elem.iterchildren('Contents')
|
||||
for o in objects:
|
||||
self.assertIsNone(o.find('./Owner'))
|
||||
|
||||
req = Request.blank('/%s?list-type=2&fetch-owner=true' % bucket_name,
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
name = elem.find('./Name').text
|
||||
self.assertEqual(name, bucket_name)
|
||||
|
||||
objects = elem.iterchildren('Contents')
|
||||
for o in objects:
|
||||
self.assertIsNotNone(o.find('./Owner'))
|
||||
|
||||
@s3acl
|
||||
def test_bucket_PUT_error(self):
|
||||
code = self._test_method_error('PUT', '/bucket', swob.HTTPCreated,
|
||||
headers={'Content-Length': 'a'})
|
||||
self.assertEqual(code, 'InvalidArgument')
|
||||
code = self._test_method_error('PUT', '/bucket', swob.HTTPCreated,
|
||||
headers={'Content-Length': '-1'})
|
||||
self.assertEqual(code, 'InvalidArgument')
|
||||
code = self._test_method_error('PUT', '/bucket', swob.HTTPUnauthorized)
|
||||
self.assertEqual(code, 'SignatureDoesNotMatch')
|
||||
code = self._test_method_error('PUT', '/bucket', swob.HTTPForbidden)
|
||||
self.assertEqual(code, 'AccessDenied')
|
||||
code = self._test_method_error('PUT', '/bucket', swob.HTTPAccepted)
|
||||
self.assertEqual(code, 'BucketAlreadyExists')
|
||||
code = self._test_method_error('PUT', '/bucket', swob.HTTPServerError)
|
||||
self.assertEqual(code, 'InternalError')
|
||||
code = self._test_method_error(
|
||||
'PUT', '/bucket+bucket', swob.HTTPCreated)
|
||||
self.assertEqual(code, 'InvalidBucketName')
|
||||
code = self._test_method_error(
|
||||
'PUT', '/192.168.11.1', swob.HTTPCreated)
|
||||
self.assertEqual(code, 'InvalidBucketName')
|
||||
code = self._test_method_error(
|
||||
'PUT', '/bucket.-bucket', swob.HTTPCreated)
|
||||
self.assertEqual(code, 'InvalidBucketName')
|
||||
code = self._test_method_error(
|
||||
'PUT', '/bucket-.bucket', swob.HTTPCreated)
|
||||
self.assertEqual(code, 'InvalidBucketName')
|
||||
code = self._test_method_error('PUT', '/bucket*', swob.HTTPCreated)
|
||||
self.assertEqual(code, 'InvalidBucketName')
|
||||
code = self._test_method_error('PUT', '/b', swob.HTTPCreated)
|
||||
self.assertEqual(code, 'InvalidBucketName')
|
||||
code = self._test_method_error(
|
||||
'PUT', '/%s' % ''.join(['b' for x in xrange(64)]),
|
||||
swob.HTTPCreated)
|
||||
self.assertEqual(code, 'InvalidBucketName')
|
||||
|
||||
@s3acl
|
||||
def test_bucket_PUT(self):
|
||||
req = Request.blank('/bucket',
|
||||
environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(body, '')
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
self.assertEqual(headers['Location'], '/bucket')
|
||||
|
||||
# Apparently some clients will include a chunked transfer-encoding
|
||||
# even with no body
|
||||
req = Request.blank('/bucket',
|
||||
environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header(),
|
||||
'Transfer-Encoding': 'chunked'})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(body, '')
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
self.assertEqual(headers['Location'], '/bucket')
|
||||
|
||||
with UnreadableInput(self) as fake_input:
|
||||
req = Request.blank(
|
||||
'/bucket',
|
||||
environ={'REQUEST_METHOD': 'PUT',
|
||||
'wsgi.input': fake_input},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(body, '')
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
self.assertEqual(headers['Location'], '/bucket')
|
||||
|
||||
def _test_bucket_PUT_with_location(self, root_element):
|
||||
elem = Element(root_element)
|
||||
SubElement(elem, 'LocationConstraint').text = 'US'
|
||||
xml = tostring(elem)
|
||||
|
||||
req = Request.blank('/bucket',
|
||||
environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()},
|
||||
body=xml)
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
|
||||
@s3acl
|
||||
def test_bucket_PUT_with_location(self):
|
||||
self._test_bucket_PUT_with_location('CreateBucketConfiguration')
|
||||
|
||||
@s3acl
|
||||
def test_bucket_PUT_with_ami_location(self):
|
||||
# ec2-ami-tools apparently uses CreateBucketConstraint instead?
|
||||
self._test_bucket_PUT_with_location('CreateBucketConstraint')
|
||||
|
||||
@s3acl
|
||||
def test_bucket_PUT_with_strange_location(self):
|
||||
# Even crazier: it doesn't seem to matter
|
||||
self._test_bucket_PUT_with_location('foo')
|
||||
|
||||
def test_bucket_PUT_with_canned_acl(self):
|
||||
req = Request.blank('/bucket',
|
||||
environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header(),
|
||||
'X-Amz-Acl': 'public-read'})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
_, _, headers = self.swift.calls_with_headers[-1]
|
||||
self.assertTrue('X-Container-Read' in headers)
|
||||
self.assertEqual(headers.get('X-Container-Read'), '.r:*,.rlistings')
|
||||
self.assertNotIn('X-Container-Sysmeta-S3api-Acl', headers)
|
||||
|
||||
@s3acl(s3acl_only=True)
|
||||
def test_bucket_PUT_with_canned_s3acl(self):
|
||||
account = 'test:tester'
|
||||
acl = \
|
||||
encode_acl('container', ACLPublicRead(Owner(account, account)))
|
||||
req = Request.blank('/bucket',
|
||||
environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header(),
|
||||
'X-Amz-Acl': 'public-read'})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
_, _, headers = self.swift.calls_with_headers[-1]
|
||||
self.assertNotIn('X-Container-Read', headers)
|
||||
self.assertIn('X-Container-Sysmeta-S3api-Acl', headers)
|
||||
self.assertEqual(headers.get('X-Container-Sysmeta-S3api-Acl'),
|
||||
acl['x-container-sysmeta-s3api-acl'])
|
||||
|
||||
@s3acl
|
||||
def test_bucket_PUT_with_location_error(self):
|
||||
elem = Element('CreateBucketConfiguration')
|
||||
SubElement(elem, 'LocationConstraint').text = 'XXX'
|
||||
xml = tostring(elem)
|
||||
|
||||
req = Request.blank('/bucket',
|
||||
environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()},
|
||||
body=xml)
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(self._get_error_code(body),
|
||||
'InvalidLocationConstraint')
|
||||
|
||||
@s3acl
|
||||
def test_bucket_PUT_with_location_invalid_xml(self):
|
||||
req = Request.blank('/bucket',
|
||||
environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()},
|
||||
body='invalid_xml')
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(self._get_error_code(body), 'MalformedXML')
|
||||
|
||||
def _test_method_error_delete(self, path, sw_resp):
|
||||
self.swift.register('HEAD', '/v1/AUTH_test' + path, sw_resp, {}, None)
|
||||
return self._test_method_error('DELETE', path, sw_resp)
|
||||
|
||||
@s3acl
|
||||
def test_bucket_DELETE_error(self):
|
||||
code = self._test_method_error_delete('/bucket', swob.HTTPUnauthorized)
|
||||
self.assertEqual(code, 'SignatureDoesNotMatch')
|
||||
code = self._test_method_error_delete('/bucket', swob.HTTPForbidden)
|
||||
self.assertEqual(code, 'AccessDenied')
|
||||
code = self._test_method_error_delete('/bucket', swob.HTTPNotFound)
|
||||
self.assertEqual(code, 'NoSuchBucket')
|
||||
code = self._test_method_error_delete('/bucket', swob.HTTPServerError)
|
||||
self.assertEqual(code, 'InternalError')
|
||||
|
||||
# bucket not empty is now validated at s3api
|
||||
self.swift.register('HEAD', '/v1/AUTH_test/bucket', swob.HTTPNoContent,
|
||||
{'X-Container-Object-Count': '1'}, None)
|
||||
code = self._test_method_error('DELETE', '/bucket', swob.HTTPConflict)
|
||||
self.assertEqual(code, 'BucketNotEmpty')
|
||||
|
||||
@s3acl
|
||||
def test_bucket_DELETE(self):
|
||||
# overwrite default HEAD to return x-container-object-count
|
||||
self.swift.register(
|
||||
'HEAD', '/v1/AUTH_test/bucket', swob.HTTPNoContent,
|
||||
{'X-Container-Object-Count': 0}, None)
|
||||
|
||||
req = Request.blank('/bucket',
|
||||
environ={'REQUEST_METHOD': 'DELETE'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '204')
|
||||
|
||||
@s3acl
|
||||
def test_bucket_DELETE_error_while_segment_bucket_delete(self):
|
||||
# An error occurred while deleting segment objects
|
||||
self.swift.register('DELETE', '/v1/AUTH_test/bucket+segments/lily',
|
||||
swob.HTTPServiceUnavailable, {}, json.dumps([]))
|
||||
# overwrite default HEAD to return x-container-object-count
|
||||
self.swift.register(
|
||||
'HEAD', '/v1/AUTH_test/bucket', swob.HTTPNoContent,
|
||||
{'X-Container-Object-Count': 0}, None)
|
||||
|
||||
req = Request.blank('/bucket',
|
||||
environ={'REQUEST_METHOD': 'DELETE'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
self.assertEqual(status.split()[0], '503')
|
||||
called = [(method, path) for method, path, _ in
|
||||
self.swift.calls_with_headers]
|
||||
# Don't delete original bucket when error occurred in segment container
|
||||
self.assertNotIn(('DELETE', '/v1/AUTH_test/bucket'), called)
|
||||
|
||||
def _test_bucket_for_s3acl(self, method, account):
|
||||
req = Request.blank('/bucket',
|
||||
environ={'REQUEST_METHOD': method},
|
||||
headers={'Authorization': 'AWS %s:hmac' % account,
|
||||
'Date': self.get_date_header()})
|
||||
|
||||
return self.call_s3api(req)
|
||||
|
||||
@s3acl(s3acl_only=True)
|
||||
def test_bucket_GET_without_permission(self):
|
||||
status, headers, body = self._test_bucket_for_s3acl('GET',
|
||||
'test:other')
|
||||
self.assertEqual(self._get_error_code(body), 'AccessDenied')
|
||||
|
||||
@s3acl(s3acl_only=True)
|
||||
def test_bucket_GET_with_read_permission(self):
|
||||
status, headers, body = self._test_bucket_for_s3acl('GET',
|
||||
'test:read')
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
|
||||
@s3acl(s3acl_only=True)
|
||||
def test_bucket_GET_with_fullcontrol_permission(self):
|
||||
status, headers, body = \
|
||||
self._test_bucket_for_s3acl('GET', 'test:full_control')
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
|
||||
@s3acl(s3acl_only=True)
|
||||
def test_bucket_GET_with_owner_permission(self):
|
||||
status, headers, body = self._test_bucket_for_s3acl('GET',
|
||||
'test:tester')
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
|
||||
def _test_bucket_GET_canned_acl(self, bucket):
|
||||
req = Request.blank('/%s' % bucket,
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
|
||||
return self.call_s3api(req)
|
||||
|
||||
@s3acl(s3acl_only=True)
|
||||
def test_bucket_GET_authenticated_users(self):
|
||||
status, headers, body = \
|
||||
self._test_bucket_GET_canned_acl('authenticated')
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
|
||||
@s3acl(s3acl_only=True)
|
||||
def test_bucket_GET_all_users(self):
|
||||
status, headers, body = self._test_bucket_GET_canned_acl('public')
|
||||
self.assertEqual(status.split()[0], '200')
|
||||
|
||||
@s3acl(s3acl_only=True)
|
||||
def test_bucket_DELETE_without_permission(self):
|
||||
status, headers, body = self._test_bucket_for_s3acl('DELETE',
|
||||
'test:other')
|
||||
self.assertEqual(self._get_error_code(body), 'AccessDenied')
|
||||
# Don't delete anything in backend Swift
|
||||
called = [method for method, _, _ in self.swift.calls_with_headers]
|
||||
self.assertNotIn('DELETE', called)
|
||||
|
||||
@s3acl(s3acl_only=True)
|
||||
def test_bucket_DELETE_with_write_permission(self):
|
||||
status, headers, body = self._test_bucket_for_s3acl('DELETE',
|
||||
'test:write')
|
||||
self.assertEqual(self._get_error_code(body), 'AccessDenied')
|
||||
# Don't delete anything in backend Swift
|
||||
called = [method for method, _, _ in self.swift.calls_with_headers]
|
||||
self.assertNotIn('DELETE', called)
|
||||
|
||||
@s3acl(s3acl_only=True)
|
||||
def test_bucket_DELETE_with_fullcontrol_permission(self):
|
||||
status, headers, body = \
|
||||
self._test_bucket_for_s3acl('DELETE', 'test:full_control')
|
||||
self.assertEqual(self._get_error_code(body), 'AccessDenied')
|
||||
# Don't delete anything in backend Swift
|
||||
called = [method for method, _, _ in self.swift.calls_with_headers]
|
||||
self.assertNotIn('DELETE', called)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
44
test/unit/common/middleware/s3api/test_cfg.py
Normal file
44
test/unit/common/middleware/s3api/test_cfg.py
Normal file
@ -0,0 +1,44 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
|
||||
from swift.common.middleware.s3api.utils import Config
|
||||
|
||||
|
||||
class TestS3ApiCfg(unittest.TestCase):
|
||||
def test_config(self):
|
||||
conf = Config(
|
||||
{
|
||||
'a': 'str',
|
||||
'b': 10,
|
||||
'c': True,
|
||||
}
|
||||
)
|
||||
|
||||
conf.update(
|
||||
{
|
||||
'a': 'str2',
|
||||
'b': '100',
|
||||
'c': 'false',
|
||||
}
|
||||
)
|
||||
|
||||
self.assertEqual(conf['a'], 'str2')
|
||||
self.assertEqual(conf['b'], 100)
|
||||
self.assertEqual(conf['c'], False)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
73
test/unit/common/middleware/s3api/test_etree.py
Normal file
73
test/unit/common/middleware/s3api/test_etree.py
Normal file
@ -0,0 +1,73 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
|
||||
from swift.common.middleware.s3api import etree
|
||||
|
||||
|
||||
class TestS3ApiEtree(unittest.TestCase):
|
||||
def test_xml_namespace(self):
|
||||
def test_xml(ns, prefix):
|
||||
return '<A %(ns)s><%(prefix)sB>C</%(prefix)sB></A>' % \
|
||||
({'ns': ns, 'prefix': prefix})
|
||||
|
||||
# No namespace is same as having the S3 namespace.
|
||||
xml = test_xml('', '')
|
||||
elem = etree.fromstring(xml)
|
||||
self.assertEqual(elem.find('./B').text, 'C')
|
||||
|
||||
# The S3 namespace is handled as no namespace.
|
||||
xml = test_xml('xmlns="%s"' % etree.XMLNS_S3, '')
|
||||
elem = etree.fromstring(xml)
|
||||
self.assertEqual(elem.find('./B').text, 'C')
|
||||
|
||||
xml = test_xml('xmlns:s3="%s"' % etree.XMLNS_S3, 's3:')
|
||||
elem = etree.fromstring(xml)
|
||||
self.assertEqual(elem.find('./B').text, 'C')
|
||||
|
||||
# Any namespaces without a prefix work as no namespace.
|
||||
xml = test_xml('xmlns="http://example.com/"', '')
|
||||
elem = etree.fromstring(xml)
|
||||
self.assertEqual(elem.find('./B').text, 'C')
|
||||
|
||||
xml = test_xml('xmlns:s3="http://example.com/"', 's3:')
|
||||
elem = etree.fromstring(xml)
|
||||
self.assertIsNone(elem.find('./B'))
|
||||
|
||||
def test_xml_with_comments(self):
|
||||
xml = '<A><!-- comment --><B>C</B></A>'
|
||||
elem = etree.fromstring(xml)
|
||||
self.assertEqual(elem.find('./B').text, 'C')
|
||||
|
||||
def test_tostring_with_nonascii_text(self):
|
||||
elem = etree.Element('Test')
|
||||
sub = etree.SubElement(elem, 'FOO')
|
||||
sub.text = '\xef\xbc\xa1'
|
||||
self.assertTrue(isinstance(sub.text, str))
|
||||
xml_string = etree.tostring(elem)
|
||||
self.assertTrue(isinstance(xml_string, str))
|
||||
|
||||
def test_fromstring_with_nonascii_text(self):
|
||||
input_str = '<?xml version="1.0" encoding="UTF-8"?>\n' \
|
||||
'<Test><FOO>\xef\xbc\xa1</FOO></Test>'
|
||||
elem = etree.fromstring(input_str)
|
||||
text = elem.find('FOO').text
|
||||
self.assertEqual(text, '\xef\xbc\xa1')
|
||||
self.assertTrue(isinstance(text, str))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
69
test/unit/common/middleware/s3api/test_helpers.py
Normal file
69
test/unit/common/middleware/s3api/test_helpers.py
Normal file
@ -0,0 +1,69 @@
|
||||
# Copyright (c) 2013 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This stuff can't live in test/unit/__init__.py due to its swob dependency.
|
||||
|
||||
import unittest
|
||||
from test.unit.common.middleware.s3api.helpers import FakeSwift
|
||||
from swift.common.middleware.s3api.utils import sysmeta_header
|
||||
from swift.common.swob import HeaderKeyDict
|
||||
from mock import MagicMock
|
||||
|
||||
|
||||
class S3ApiHelperTestCase(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.method = 'HEAD'
|
||||
self.path = '/v1/AUTH_test/bucket'
|
||||
|
||||
def _check_headers(self, swift, method, path, headers):
|
||||
_, response_headers, _ = swift._responses[(method, path)]
|
||||
self.assertEqual(headers, response_headers)
|
||||
|
||||
def test_fake_swift_sysmeta(self):
|
||||
swift = FakeSwift()
|
||||
orig_headers = HeaderKeyDict()
|
||||
orig_headers.update({sysmeta_header('container', 'acl'): 'test',
|
||||
'x-container-meta-foo': 'bar'})
|
||||
|
||||
swift.register(self.method, self.path, MagicMock(), orig_headers, None)
|
||||
|
||||
self._check_headers(swift, self.method, self.path, orig_headers)
|
||||
|
||||
new_headers = orig_headers.copy()
|
||||
del new_headers[sysmeta_header('container', 'acl').title()]
|
||||
swift.register(self.method, self.path, MagicMock(), new_headers, None)
|
||||
|
||||
self._check_headers(swift, self.method, self.path, orig_headers)
|
||||
|
||||
def test_fake_swift_sysmeta_overwrite(self):
|
||||
swift = FakeSwift()
|
||||
orig_headers = HeaderKeyDict()
|
||||
orig_headers.update({sysmeta_header('container', 'acl'): 'test',
|
||||
'x-container-meta-foo': 'bar'})
|
||||
swift.register(self.method, self.path, MagicMock(), orig_headers, None)
|
||||
|
||||
self._check_headers(swift, self.method, self.path, orig_headers)
|
||||
|
||||
new_headers = orig_headers.copy()
|
||||
new_headers[sysmeta_header('container', 'acl').title()] = 'bar'
|
||||
|
||||
swift.register(self.method, self.path, MagicMock(), new_headers, None)
|
||||
|
||||
self.assertFalse(orig_headers == new_headers)
|
||||
self._check_headers(swift, self.method, self.path, new_headers)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user