Update SAIO & docker image to use 62xx ports
Note that existing SAIOs with 60xx ports should still work fine. Change-Id: If5dd79f926fa51a58b3a732b212b484a7e9f00db Related-Change: Ie1c778b159792c8e259e2a54cb86051686ac9d18
This commit is contained in:
parent
0eee3b5bcd
commit
314347a3cb
@ -7,36 +7,36 @@ cd /etc/swift
|
||||
rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
|
||||
|
||||
swift-ring-builder object.builder create 10 3 1
|
||||
swift-ring-builder object.builder add r1z1-127.0.0.1:6010/sdb1 1
|
||||
swift-ring-builder object.builder add r1z2-127.0.0.2:6020/sdb2 1
|
||||
swift-ring-builder object.builder add r1z3-127.0.0.3:6030/sdb3 1
|
||||
swift-ring-builder object.builder add r1z4-127.0.0.4:6040/sdb4 1
|
||||
swift-ring-builder object.builder add r1z1-127.0.0.1:6210/sdb1 1
|
||||
swift-ring-builder object.builder add r1z2-127.0.0.2:6220/sdb2 1
|
||||
swift-ring-builder object.builder add r1z3-127.0.0.3:6230/sdb3 1
|
||||
swift-ring-builder object.builder add r1z4-127.0.0.4:6240/sdb4 1
|
||||
swift-ring-builder object.builder rebalance
|
||||
swift-ring-builder object-1.builder create 10 2 1
|
||||
swift-ring-builder object-1.builder add r1z1-127.0.0.1:6010/sdb1 1
|
||||
swift-ring-builder object-1.builder add r1z2-127.0.0.2:6020/sdb2 1
|
||||
swift-ring-builder object-1.builder add r1z3-127.0.0.3:6030/sdb3 1
|
||||
swift-ring-builder object-1.builder add r1z4-127.0.0.4:6040/sdb4 1
|
||||
swift-ring-builder object-1.builder add r1z1-127.0.0.1:6210/sdb1 1
|
||||
swift-ring-builder object-1.builder add r1z2-127.0.0.2:6220/sdb2 1
|
||||
swift-ring-builder object-1.builder add r1z3-127.0.0.3:6230/sdb3 1
|
||||
swift-ring-builder object-1.builder add r1z4-127.0.0.4:6240/sdb4 1
|
||||
swift-ring-builder object-1.builder rebalance
|
||||
swift-ring-builder object-2.builder create 10 6 1
|
||||
swift-ring-builder object-2.builder add r1z1-127.0.0.1:6010/sdb1 1
|
||||
swift-ring-builder object-2.builder add r1z1-127.0.0.1:6010/sdb5 1
|
||||
swift-ring-builder object-2.builder add r1z2-127.0.0.2:6020/sdb2 1
|
||||
swift-ring-builder object-2.builder add r1z2-127.0.0.2:6020/sdb6 1
|
||||
swift-ring-builder object-2.builder add r1z3-127.0.0.3:6030/sdb3 1
|
||||
swift-ring-builder object-2.builder add r1z3-127.0.0.3:6030/sdb7 1
|
||||
swift-ring-builder object-2.builder add r1z4-127.0.0.4:6040/sdb4 1
|
||||
swift-ring-builder object-2.builder add r1z4-127.0.0.4:6040/sdb8 1
|
||||
swift-ring-builder object-2.builder add r1z1-127.0.0.1:6210/sdb1 1
|
||||
swift-ring-builder object-2.builder add r1z1-127.0.0.1:6210/sdb5 1
|
||||
swift-ring-builder object-2.builder add r1z2-127.0.0.2:6220/sdb2 1
|
||||
swift-ring-builder object-2.builder add r1z2-127.0.0.2:6220/sdb6 1
|
||||
swift-ring-builder object-2.builder add r1z3-127.0.0.3:6230/sdb3 1
|
||||
swift-ring-builder object-2.builder add r1z3-127.0.0.3:6230/sdb7 1
|
||||
swift-ring-builder object-2.builder add r1z4-127.0.0.4:6240/sdb4 1
|
||||
swift-ring-builder object-2.builder add r1z4-127.0.0.4:6240/sdb8 1
|
||||
swift-ring-builder object-2.builder rebalance
|
||||
swift-ring-builder container.builder create 10 3 1
|
||||
swift-ring-builder container.builder add r1z1-127.0.0.1:6011/sdb1 1
|
||||
swift-ring-builder container.builder add r1z2-127.0.0.2:6021/sdb2 1
|
||||
swift-ring-builder container.builder add r1z3-127.0.0.3:6031/sdb3 1
|
||||
swift-ring-builder container.builder add r1z4-127.0.0.4:6041/sdb4 1
|
||||
swift-ring-builder container.builder add r1z1-127.0.0.1:6211/sdb1 1
|
||||
swift-ring-builder container.builder add r1z2-127.0.0.2:6221/sdb2 1
|
||||
swift-ring-builder container.builder add r1z3-127.0.0.3:6231/sdb3 1
|
||||
swift-ring-builder container.builder add r1z4-127.0.0.4:6241/sdb4 1
|
||||
swift-ring-builder container.builder rebalance
|
||||
swift-ring-builder account.builder create 10 3 1
|
||||
swift-ring-builder account.builder add r1z1-127.0.0.1:6012/sdb1 1
|
||||
swift-ring-builder account.builder add r1z2-127.0.0.2:6022/sdb2 1
|
||||
swift-ring-builder account.builder add r1z3-127.0.0.3:6032/sdb3 1
|
||||
swift-ring-builder account.builder add r1z4-127.0.0.4:6042/sdb4 1
|
||||
swift-ring-builder account.builder add r1z1-127.0.0.1:6212/sdb1 1
|
||||
swift-ring-builder account.builder add r1z2-127.0.0.2:6222/sdb2 1
|
||||
swift-ring-builder account.builder add r1z3-127.0.0.3:6232/sdb3 1
|
||||
swift-ring-builder account.builder add r1z4-127.0.0.4:6242/sdb4 1
|
||||
swift-ring-builder account.builder rebalance
|
||||
|
@ -4,74 +4,74 @@ log file = /var/log/rsyncd.log
|
||||
pid file = /var/run/rsyncd.pid
|
||||
address = 0.0.0.0
|
||||
|
||||
[account6012]
|
||||
[account6212]
|
||||
max connections = 25
|
||||
path = /srv/1/node/
|
||||
read only = false
|
||||
lock file = /var/lock/account6012.lock
|
||||
lock file = /var/lock/account6212.lock
|
||||
|
||||
[account6022]
|
||||
[account6222]
|
||||
max connections = 25
|
||||
path = /srv/2/node/
|
||||
read only = false
|
||||
lock file = /var/lock/account6022.lock
|
||||
lock file = /var/lock/account6222.lock
|
||||
|
||||
[account6032]
|
||||
[account6232]
|
||||
max connections = 25
|
||||
path = /srv/3/node/
|
||||
read only = false
|
||||
lock file = /var/lock/account6032.lock
|
||||
lock file = /var/lock/account6232.lock
|
||||
|
||||
[account6042]
|
||||
[account6242]
|
||||
max connections = 25
|
||||
path = /srv/4/node/
|
||||
read only = false
|
||||
lock file = /var/lock/account6042.lock
|
||||
lock file = /var/lock/account6242.lock
|
||||
|
||||
[container6011]
|
||||
[container6211]
|
||||
max connections = 25
|
||||
path = /srv/1/node/
|
||||
read only = false
|
||||
lock file = /var/lock/container6011.lock
|
||||
lock file = /var/lock/container6211.lock
|
||||
|
||||
[container6021]
|
||||
[container6221]
|
||||
max connections = 25
|
||||
path = /srv/2/node/
|
||||
read only = false
|
||||
lock file = /var/lock/container6021.lock
|
||||
lock file = /var/lock/container6221.lock
|
||||
|
||||
[container6031]
|
||||
[container6231]
|
||||
max connections = 25
|
||||
path = /srv/3/node/
|
||||
read only = false
|
||||
lock file = /var/lock/container6031.lock
|
||||
lock file = /var/lock/container6231.lock
|
||||
|
||||
[container6041]
|
||||
[container6241]
|
||||
max connections = 25
|
||||
path = /srv/4/node/
|
||||
read only = false
|
||||
lock file = /var/lock/container6041.lock
|
||||
lock file = /var/lock/container6241.lock
|
||||
|
||||
[object6010]
|
||||
[object6210]
|
||||
max connections = 25
|
||||
path = /srv/1/node/
|
||||
read only = false
|
||||
lock file = /var/lock/object6010.lock
|
||||
lock file = /var/lock/object6210.lock
|
||||
|
||||
[object6020]
|
||||
[object6220]
|
||||
max connections = 25
|
||||
path = /srv/2/node/
|
||||
read only = false
|
||||
lock file = /var/lock/object6020.lock
|
||||
lock file = /var/lock/object6220.lock
|
||||
|
||||
[object6030]
|
||||
[object6230]
|
||||
max connections = 25
|
||||
path = /srv/3/node/
|
||||
read only = false
|
||||
lock file = /var/lock/object6030.lock
|
||||
lock file = /var/lock/object6230.lock
|
||||
|
||||
[object6040]
|
||||
[object6240]
|
||||
max connections = 25
|
||||
path = /srv/4/node/
|
||||
read only = false
|
||||
lock file = /var/lock/object6040.lock
|
||||
lock file = /var/lock/object6240.lock
|
||||
|
@ -3,7 +3,7 @@ devices = /srv/1/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_ip = 127.0.0.1
|
||||
bind_port = 6012
|
||||
bind_port = 6212
|
||||
workers = 1
|
||||
user = <your-user-name>
|
||||
log_facility = LOG_LOCAL2
|
||||
|
@ -3,7 +3,7 @@ devices = /srv/2/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_ip = 127.0.0.2
|
||||
bind_port = 6022
|
||||
bind_port = 6222
|
||||
workers = 1
|
||||
user = <your-user-name>
|
||||
log_facility = LOG_LOCAL3
|
||||
|
@ -3,7 +3,7 @@ devices = /srv/3/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_ip = 127.0.0.3
|
||||
bind_port = 6032
|
||||
bind_port = 6232
|
||||
workers = 1
|
||||
user = <your-user-name>
|
||||
log_facility = LOG_LOCAL4
|
||||
|
@ -3,7 +3,7 @@ devices = /srv/4/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_ip = 127.0.0.4
|
||||
bind_port = 6042
|
||||
bind_port = 6242
|
||||
workers = 1
|
||||
user = <your-user-name>
|
||||
log_facility = LOG_LOCAL5
|
||||
|
@ -3,7 +3,7 @@ devices = /srv/1/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_ip = 127.0.0.1
|
||||
bind_port = 6011
|
||||
bind_port = 6211
|
||||
workers = 1
|
||||
user = <your-user-name>
|
||||
log_facility = LOG_LOCAL2
|
||||
|
@ -3,7 +3,7 @@ devices = /srv/2/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_ip = 127.0.0.2
|
||||
bind_port = 6021
|
||||
bind_port = 6221
|
||||
workers = 1
|
||||
user = <your-user-name>
|
||||
log_facility = LOG_LOCAL3
|
||||
|
@ -3,7 +3,7 @@ devices = /srv/3/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_ip = 127.0.0.3
|
||||
bind_port = 6031
|
||||
bind_port = 6231
|
||||
workers = 1
|
||||
user = <your-user-name>
|
||||
log_facility = LOG_LOCAL4
|
||||
|
@ -3,7 +3,7 @@ devices = /srv/4/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_ip = 127.0.0.4
|
||||
bind_port = 6041
|
||||
bind_port = 6241
|
||||
workers = 1
|
||||
user = <your-user-name>
|
||||
log_facility = LOG_LOCAL5
|
||||
|
@ -3,7 +3,7 @@ devices = /srv/1/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_ip = 127.0.0.1
|
||||
bind_port = 6010
|
||||
bind_port = 6210
|
||||
workers = 1
|
||||
user = <your-user-name>
|
||||
log_facility = LOG_LOCAL2
|
||||
|
@ -3,7 +3,7 @@ devices = /srv/2/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_ip = 127.0.0.2
|
||||
bind_port = 6020
|
||||
bind_port = 6220
|
||||
workers = 1
|
||||
user = <your-user-name>
|
||||
log_facility = LOG_LOCAL3
|
||||
|
@ -3,7 +3,7 @@ devices = /srv/3/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_ip = 127.0.0.3
|
||||
bind_port = 6030
|
||||
bind_port = 6230
|
||||
workers = 1
|
||||
user = <your-user-name>
|
||||
log_facility = LOG_LOCAL4
|
||||
|
@ -3,7 +3,7 @@ devices = /srv/4/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_ip = 127.0.0.4
|
||||
bind_port = 6040
|
||||
bind_port = 6240
|
||||
workers = 1
|
||||
user = <your-user-name>
|
||||
log_facility = LOG_LOCAL5
|
||||
|
@ -717,7 +717,7 @@ Once the recon middleware is enabled, a GET request for
|
||||
"/recon/<metric>" to the backend object server will return a
|
||||
JSON-formatted response::
|
||||
|
||||
fhines@ubuntu:~$ curl -i http://localhost:6030/recon/async
|
||||
fhines@ubuntu:~$ curl -i http://localhost:6230/recon/async
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
Content-Length: 20
|
||||
@ -727,7 +727,7 @@ JSON-formatted response::
|
||||
|
||||
|
||||
Note that the default port for the object server is 6200, except on a
|
||||
Swift All-In-One installation, which uses 6010, 6020, 6030, and 6040.
|
||||
Swift All-In-One installation, which uses 6210, 6220, 6230, and 6240.
|
||||
|
||||
The following metrics and telemetry are currently exposed:
|
||||
|
||||
|
@ -385,18 +385,18 @@ Setting up rsync
|
||||
|
||||
You should see the following output from the above command::
|
||||
|
||||
account6012
|
||||
account6022
|
||||
account6032
|
||||
account6042
|
||||
container6011
|
||||
container6021
|
||||
container6031
|
||||
container6041
|
||||
object6010
|
||||
object6020
|
||||
object6030
|
||||
object6040
|
||||
account6212
|
||||
account6222
|
||||
account6232
|
||||
account6242
|
||||
container6211
|
||||
container6221
|
||||
container6231
|
||||
container6241
|
||||
object6210
|
||||
object6220
|
||||
object6230
|
||||
object6240
|
||||
|
||||
------------------
|
||||
Starting memcached
|
||||
@ -670,34 +670,34 @@ Constructing initial rings
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
Device d0r1z1-127.0.0.1:6010R127.0.0.1:6010/sdb1_"" with 1.0 weight got id 0
|
||||
Device d1r1z2-127.0.0.2:6020R127.0.0.2:6020/sdb2_"" with 1.0 weight got id 1
|
||||
Device d2r1z3-127.0.0.3:6030R127.0.0.3:6030/sdb3_"" with 1.0 weight got id 2
|
||||
Device d3r1z4-127.0.0.4:6040R127.0.0.4:6040/sdb4_"" with 1.0 weight got id 3
|
||||
Device d0r1z1-127.0.0.1:6210R127.0.0.1:6210/sdb1_"" with 1.0 weight got id 0
|
||||
Device d1r1z2-127.0.0.2:6220R127.0.0.2:6220/sdb2_"" with 1.0 weight got id 1
|
||||
Device d2r1z3-127.0.0.3:6230R127.0.0.3:6230/sdb3_"" with 1.0 weight got id 2
|
||||
Device d3r1z4-127.0.0.4:6240R127.0.0.4:6240/sdb4_"" with 1.0 weight got id 3
|
||||
Reassigned 3072 (300.00%) partitions. Balance is now 0.00. Dispersion is now 0.00
|
||||
Device d0r1z1-127.0.0.1:6010R127.0.0.1:6010/sdb1_"" with 1.0 weight got id 0
|
||||
Device d1r1z2-127.0.0.2:6020R127.0.0.2:6020/sdb2_"" with 1.0 weight got id 1
|
||||
Device d2r1z3-127.0.0.3:6030R127.0.0.3:6030/sdb3_"" with 1.0 weight got id 2
|
||||
Device d3r1z4-127.0.0.4:6040R127.0.0.4:6040/sdb4_"" with 1.0 weight got id 3
|
||||
Device d0r1z1-127.0.0.1:6210R127.0.0.1:6210/sdb1_"" with 1.0 weight got id 0
|
||||
Device d1r1z2-127.0.0.2:6220R127.0.0.2:6220/sdb2_"" with 1.0 weight got id 1
|
||||
Device d2r1z3-127.0.0.3:6230R127.0.0.3:6230/sdb3_"" with 1.0 weight got id 2
|
||||
Device d3r1z4-127.0.0.4:6240R127.0.0.4:6240/sdb4_"" with 1.0 weight got id 3
|
||||
Reassigned 2048 (200.00%) partitions. Balance is now 0.00. Dispersion is now 0.00
|
||||
Device d0r1z1-127.0.0.1:6010R127.0.0.1:6010/sdb1_"" with 1.0 weight got id 0
|
||||
Device d1r1z1-127.0.0.1:6010R127.0.0.1:6010/sdb5_"" with 1.0 weight got id 1
|
||||
Device d2r1z2-127.0.0.2:6020R127.0.0.2:6020/sdb2_"" with 1.0 weight got id 2
|
||||
Device d3r1z2-127.0.0.2:6020R127.0.0.2:6020/sdb6_"" with 1.0 weight got id 3
|
||||
Device d4r1z3-127.0.0.3:6030R127.0.0.3:6030/sdb3_"" with 1.0 weight got id 4
|
||||
Device d5r1z3-127.0.0.3:6030R127.0.0.3:6030/sdb7_"" with 1.0 weight got id 5
|
||||
Device d6r1z4-127.0.0.4:6040R127.0.0.4:6040/sdb4_"" with 1.0 weight got id 6
|
||||
Device d7r1z4-127.0.0.4:6040R127.0.0.4:6040/sdb8_"" with 1.0 weight got id 7
|
||||
Device d0r1z1-127.0.0.1:6210R127.0.0.1:6210/sdb1_"" with 1.0 weight got id 0
|
||||
Device d1r1z1-127.0.0.1:6210R127.0.0.1:6210/sdb5_"" with 1.0 weight got id 1
|
||||
Device d2r1z2-127.0.0.2:6220R127.0.0.2:6220/sdb2_"" with 1.0 weight got id 2
|
||||
Device d3r1z2-127.0.0.2:6220R127.0.0.2:6220/sdb6_"" with 1.0 weight got id 3
|
||||
Device d4r1z3-127.0.0.3:6230R127.0.0.3:6230/sdb3_"" with 1.0 weight got id 4
|
||||
Device d5r1z3-127.0.0.3:6230R127.0.0.3:6230/sdb7_"" with 1.0 weight got id 5
|
||||
Device d6r1z4-127.0.0.4:6240R127.0.0.4:6240/sdb4_"" with 1.0 weight got id 6
|
||||
Device d7r1z4-127.0.0.4:6240R127.0.0.4:6240/sdb8_"" with 1.0 weight got id 7
|
||||
Reassigned 6144 (600.00%) partitions. Balance is now 0.00. Dispersion is now 0.00
|
||||
Device d0r1z1-127.0.0.1:6011R127.0.0.1:6011/sdb1_"" with 1.0 weight got id 0
|
||||
Device d1r1z2-127.0.0.2:6021R127.0.0.2:6021/sdb2_"" with 1.0 weight got id 1
|
||||
Device d2r1z3-127.0.0.3:6031R127.0.0.3:6031/sdb3_"" with 1.0 weight got id 2
|
||||
Device d3r1z4-127.0.0.4:6041R127.0.0.4:6041/sdb4_"" with 1.0 weight got id 3
|
||||
Device d0r1z1-127.0.0.1:6211R127.0.0.1:6211/sdb1_"" with 1.0 weight got id 0
|
||||
Device d1r1z2-127.0.0.2:6221R127.0.0.2:6221/sdb2_"" with 1.0 weight got id 1
|
||||
Device d2r1z3-127.0.0.3:6231R127.0.0.3:6231/sdb3_"" with 1.0 weight got id 2
|
||||
Device d3r1z4-127.0.0.4:6241R127.0.0.4:6241/sdb4_"" with 1.0 weight got id 3
|
||||
Reassigned 3072 (300.00%) partitions. Balance is now 0.00. Dispersion is now 0.00
|
||||
Device d0r1z1-127.0.0.1:6012R127.0.0.1:6012/sdb1_"" with 1.0 weight got id 0
|
||||
Device d1r1z2-127.0.0.2:6022R127.0.0.2:6022/sdb2_"" with 1.0 weight got id 1
|
||||
Device d2r1z3-127.0.0.3:6032R127.0.0.3:6032/sdb3_"" with 1.0 weight got id 2
|
||||
Device d3r1z4-127.0.0.4:6042R127.0.0.4:6042/sdb4_"" with 1.0 weight got id 3
|
||||
Device d0r1z1-127.0.0.1:6212R127.0.0.1:6212/sdb1_"" with 1.0 weight got id 0
|
||||
Device d1r1z2-127.0.0.2:6222R127.0.0.2:6222/sdb2_"" with 1.0 weight got id 1
|
||||
Device d2r1z3-127.0.0.3:6232R127.0.0.3:6232/sdb3_"" with 1.0 weight got id 2
|
||||
Device d3r1z4-127.0.0.4:6242R127.0.0.4:6242/sdb4_"" with 1.0 weight got id 3
|
||||
Reassigned 3072 (300.00%) partitions. Balance is now 0.00. Dispersion is now 0.00
|
||||
|
||||
|
||||
|
@ -40,10 +40,10 @@ to implement a usable set of policies.
|
||||
these changes)::
|
||||
|
||||
swift-ring-builder object-1.builder create 10 2 1
|
||||
swift-ring-builder object-1.builder add r1z1-127.0.0.1:6010/sdb1 1
|
||||
swift-ring-builder object-1.builder add r1z2-127.0.0.1:6020/sdb2 1
|
||||
swift-ring-builder object-1.builder add r1z3-127.0.0.1:6030/sdb3 1
|
||||
swift-ring-builder object-1.builder add r1z4-127.0.0.1:6040/sdb4 1
|
||||
swift-ring-builder object-1.builder add r1z1-127.0.0.1:6210/sdb1 1
|
||||
swift-ring-builder object-1.builder add r1z2-127.0.0.1:6220/sdb2 1
|
||||
swift-ring-builder object-1.builder add r1z3-127.0.0.1:6230/sdb3 1
|
||||
swift-ring-builder object-1.builder add r1z4-127.0.0.1:6240/sdb4 1
|
||||
swift-ring-builder object-1.builder rebalance
|
||||
|
||||
Note that the reduced replication of the silver policy is only a function
|
||||
@ -101,9 +101,9 @@ Storage Policies effect placement of data in Swift.
|
||||
|
||||
You should see this: (note placement on expected devices)::
|
||||
|
||||
["http://127.0.0.1:6030/sdb3/761/AUTH_test/myCont0/file0.txt",
|
||||
"http://127.0.0.1:6010/sdb1/761/AUTH_test/myCont0/file0.txt",
|
||||
"http://127.0.0.1:6020/sdb2/761/AUTH_test/myCont0/file0.txt"]
|
||||
["http://127.0.0.1:6230/sdb3/761/AUTH_test/myCont0/file0.txt",
|
||||
"http://127.0.0.1:6210/sdb1/761/AUTH_test/myCont0/file0.txt",
|
||||
"http://127.0.0.1:6220/sdb2/761/AUTH_test/myCont0/file0.txt"]
|
||||
|
||||
5. Create a container using policy 'silver' and put a different file in it::
|
||||
|
||||
@ -119,8 +119,8 @@ Storage Policies effect placement of data in Swift.
|
||||
|
||||
You should see this: (note placement on expected devices)::
|
||||
|
||||
["http://127.0.0.1:6010/sdb1/32/AUTH_test/myCont1/file1.txt",
|
||||
"http://127.0.0.1:6040/sdb4/32/AUTH_test/myCont1/file1.txt"]
|
||||
["http://127.0.0.1:6210/sdb1/32/AUTH_test/myCont1/file1.txt",
|
||||
"http://127.0.0.1:6240/sdb4/32/AUTH_test/myCont1/file1.txt"]
|
||||
|
||||
7. Confirm account information with HEAD, make sure that your container-updater
|
||||
service is running and has executed once since you performed the PUTs or the
|
||||
|
@ -38,38 +38,38 @@ For SAIO replication
|
||||
cd /etc/swift
|
||||
rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
|
||||
swift-ring-builder object.builder create 10 3 1
|
||||
swift-ring-builder object.builder add z1-127.0.0.1:6010R127.0.0.1:6050/sdb1 1
|
||||
swift-ring-builder object.builder add z2-127.0.0.1:6020R127.0.0.1:6060/sdb2 1
|
||||
swift-ring-builder object.builder add z3-127.0.0.1:6030R127.0.0.1:6070/sdb3 1
|
||||
swift-ring-builder object.builder add z4-127.0.0.1:6040R127.0.0.1:6080/sdb4 1
|
||||
swift-ring-builder object.builder add z1-127.0.0.1:6210R127.0.0.1:6250/sdb1 1
|
||||
swift-ring-builder object.builder add z2-127.0.0.1:6220R127.0.0.1:6260/sdb2 1
|
||||
swift-ring-builder object.builder add z3-127.0.0.1:6230R127.0.0.1:6270/sdb3 1
|
||||
swift-ring-builder object.builder add z4-127.0.0.1:6240R127.0.0.1:6280/sdb4 1
|
||||
swift-ring-builder object.builder rebalance
|
||||
swift-ring-builder object-1.builder create 10 2 1
|
||||
swift-ring-builder object-1.builder add z1-127.0.0.1:6010R127.0.0.1:6050/sdb1 1
|
||||
swift-ring-builder object-1.builder add z2-127.0.0.1:6020R127.0.0.1:6060/sdb2 1
|
||||
swift-ring-builder object-1.builder add z3-127.0.0.1:6030R127.0.0.1:6070/sdb3 1
|
||||
swift-ring-builder object-1.builder add z4-127.0.0.1:6040R127.0.0.1:6080/sdb4 1
|
||||
swift-ring-builder object-1.builder add z1-127.0.0.1:6210R127.0.0.1:6250/sdb1 1
|
||||
swift-ring-builder object-1.builder add z2-127.0.0.1:6220R127.0.0.1:6260/sdb2 1
|
||||
swift-ring-builder object-1.builder add z3-127.0.0.1:6230R127.0.0.1:6270/sdb3 1
|
||||
swift-ring-builder object-1.builder add z4-127.0.0.1:6240R127.0.0.1:6280/sdb4 1
|
||||
swift-ring-builder object-1.builder rebalance
|
||||
swift-ring-builder object-2.builder create 10 6 1
|
||||
swift-ring-builder object-2.builder add z1-127.0.0.1:6010R127.0.0.1:6050/sdb1 1
|
||||
swift-ring-builder object-2.builder add z1-127.0.0.1:6010R127.0.0.1:6050/sdb5 1
|
||||
swift-ring-builder object-2.builder add z2-127.0.0.1:6020R127.0.0.1:6060/sdb2 1
|
||||
swift-ring-builder object-2.builder add z2-127.0.0.1:6020R127.0.0.1:6060/sdb6 1
|
||||
swift-ring-builder object-2.builder add z3-127.0.0.1:6030R127.0.0.1:6070/sdb3 1
|
||||
swift-ring-builder object-2.builder add z3-127.0.0.1:6030R127.0.0.1:6070/sdb7 1
|
||||
swift-ring-builder object-2.builder add z4-127.0.0.1:6040R127.0.0.1:6080/sdb4 1
|
||||
swift-ring-builder object-2.builder add z4-127.0.0.1:6040R127.0.0.1:6080/sdb8 1
|
||||
swift-ring-builder object-2.builder add z1-127.0.0.1:6210R127.0.0.1:6250/sdb1 1
|
||||
swift-ring-builder object-2.builder add z1-127.0.0.1:6210R127.0.0.1:6250/sdb5 1
|
||||
swift-ring-builder object-2.builder add z2-127.0.0.1:6220R127.0.0.1:6260/sdb2 1
|
||||
swift-ring-builder object-2.builder add z2-127.0.0.1:6220R127.0.0.1:6260/sdb6 1
|
||||
swift-ring-builder object-2.builder add z3-127.0.0.1:6230R127.0.0.1:6270/sdb3 1
|
||||
swift-ring-builder object-2.builder add z3-127.0.0.1:6230R127.0.0.1:6270/sdb7 1
|
||||
swift-ring-builder object-2.builder add z4-127.0.0.1:6240R127.0.0.1:6280/sdb4 1
|
||||
swift-ring-builder object-2.builder add z4-127.0.0.1:6240R127.0.0.1:6280/sdb8 1
|
||||
swift-ring-builder object-2.builder rebalance
|
||||
swift-ring-builder container.builder create 10 3 1
|
||||
swift-ring-builder container.builder add z1-127.0.0.1:6011R127.0.0.1:6051/sdb1 1
|
||||
swift-ring-builder container.builder add z2-127.0.0.1:6021R127.0.0.1:6061/sdb2 1
|
||||
swift-ring-builder container.builder add z3-127.0.0.1:6031R127.0.0.1:6071/sdb3 1
|
||||
swift-ring-builder container.builder add z4-127.0.0.1:6041R127.0.0.1:6081/sdb4 1
|
||||
swift-ring-builder container.builder add z1-127.0.0.1:6211R127.0.0.1:6251/sdb1 1
|
||||
swift-ring-builder container.builder add z2-127.0.0.1:6221R127.0.0.1:6261/sdb2 1
|
||||
swift-ring-builder container.builder add z3-127.0.0.1:6231R127.0.0.1:6271/sdb3 1
|
||||
swift-ring-builder container.builder add z4-127.0.0.1:6241R127.0.0.1:6281/sdb4 1
|
||||
swift-ring-builder container.builder rebalance
|
||||
swift-ring-builder account.builder create 10 3 1
|
||||
swift-ring-builder account.builder add z1-127.0.0.1:6012R127.0.0.1:6052/sdb1 1
|
||||
swift-ring-builder account.builder add z2-127.0.0.1:6022R127.0.0.1:6062/sdb2 1
|
||||
swift-ring-builder account.builder add z3-127.0.0.1:6032R127.0.0.1:6072/sdb3 1
|
||||
swift-ring-builder account.builder add z4-127.0.0.1:6042R127.0.0.1:6082/sdb4 1
|
||||
swift-ring-builder account.builder add z1-127.0.0.1:6212R127.0.0.1:6252/sdb1 1
|
||||
swift-ring-builder account.builder add z2-127.0.0.1:6222R127.0.0.1:6262/sdb2 1
|
||||
swift-ring-builder account.builder add z3-127.0.0.1:6232R127.0.0.1:6272/sdb3 1
|
||||
swift-ring-builder account.builder add z4-127.0.0.1:6242R127.0.0.1:6282/sdb4 1
|
||||
swift-ring-builder account.builder rebalance
|
||||
|
||||
.. note::
|
||||
@ -79,79 +79,79 @@ For SAIO replication
|
||||
|
||||
#. Add next rows in ``/etc/rsyncd.conf``::
|
||||
|
||||
[account6052]
|
||||
[account6252]
|
||||
max connections = 25
|
||||
path = /srv/1/node/
|
||||
read only = false
|
||||
lock file = /var/lock/account6052.lock
|
||||
lock file = /var/lock/account6252.lock
|
||||
|
||||
[account6062]
|
||||
[account6262]
|
||||
max connections = 25
|
||||
path = /srv/2/node/
|
||||
read only = false
|
||||
lock file = /var/lock/account6062.lock
|
||||
lock file = /var/lock/account6262.lock
|
||||
|
||||
[account6072]
|
||||
[account6272]
|
||||
max connections = 25
|
||||
path = /srv/3/node/
|
||||
read only = false
|
||||
lock file = /var/lock/account6072.lock
|
||||
lock file = /var/lock/account6272.lock
|
||||
|
||||
[account6082]
|
||||
[account6282]
|
||||
max connections = 25
|
||||
path = /srv/4/node/
|
||||
read only = false
|
||||
lock file = /var/lock/account6082.lock
|
||||
lock file = /var/lock/account6282.lock
|
||||
|
||||
|
||||
[container6051]
|
||||
[container6251]
|
||||
max connections = 25
|
||||
path = /srv/1/node/
|
||||
read only = false
|
||||
lock file = /var/lock/container6051.lock
|
||||
lock file = /var/lock/container6251.lock
|
||||
|
||||
[container6061]
|
||||
[container6261]
|
||||
max connections = 25
|
||||
path = /srv/2/node/
|
||||
read only = false
|
||||
lock file = /var/lock/container6061.lock
|
||||
lock file = /var/lock/container6261.lock
|
||||
|
||||
[container6071]
|
||||
[container6271]
|
||||
max connections = 25
|
||||
path = /srv/3/node/
|
||||
read only = false
|
||||
lock file = /var/lock/container6071.lock
|
||||
lock file = /var/lock/container6271.lock
|
||||
|
||||
[container6081]
|
||||
[container6281]
|
||||
max connections = 25
|
||||
path = /srv/4/node/
|
||||
read only = false
|
||||
lock file = /var/lock/container6081.lock
|
||||
lock file = /var/lock/container6281.lock
|
||||
|
||||
|
||||
[object6050]
|
||||
[object6250]
|
||||
max connections = 25
|
||||
path = /srv/1/node/
|
||||
read only = false
|
||||
lock file = /var/lock/object6050.lock
|
||||
lock file = /var/lock/object6250.lock
|
||||
|
||||
[object6060]
|
||||
[object6260]
|
||||
max connections = 25
|
||||
path = /srv/2/node/
|
||||
read only = false
|
||||
lock file = /var/lock/object6060.lock
|
||||
lock file = /var/lock/object6260.lock
|
||||
|
||||
[object6070]
|
||||
[object6270]
|
||||
max connections = 25
|
||||
path = /srv/3/node/
|
||||
read only = false
|
||||
lock file = /var/lock/object6070.lock
|
||||
lock file = /var/lock/object6270.lock
|
||||
|
||||
[object6080]
|
||||
[object6280]
|
||||
max connections = 25
|
||||
path = /srv/4/node/
|
||||
read only = false
|
||||
lock file = /var/lock/object6080.lock
|
||||
lock file = /var/lock/object6280.lock
|
||||
|
||||
#. Restart rsync daemon::
|
||||
|
||||
@ -173,7 +173,7 @@ For SAIO replication
|
||||
devices = /srv/1/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_port = 6050
|
||||
bind_port = 6250
|
||||
user = swift
|
||||
log_facility = LOG_LOCAL2
|
||||
recon_cache_path = /var/cache/swift
|
||||
@ -197,7 +197,7 @@ For SAIO replication
|
||||
devices = /srv/2/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_port = 6060
|
||||
bind_port = 6260
|
||||
user = swift
|
||||
log_facility = LOG_LOCAL3
|
||||
recon_cache_path = /var/cache/swift2
|
||||
@ -221,7 +221,7 @@ For SAIO replication
|
||||
devices = /srv/3/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_port = 6070
|
||||
bind_port = 6270
|
||||
user = swift
|
||||
log_facility = LOG_LOCAL4
|
||||
recon_cache_path = /var/cache/swift3
|
||||
@ -245,7 +245,7 @@ For SAIO replication
|
||||
devices = /srv/4/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_port = 6080
|
||||
bind_port = 6280
|
||||
user = swift
|
||||
log_facility = LOG_LOCAL5
|
||||
recon_cache_path = /var/cache/swift4
|
||||
@ -271,7 +271,7 @@ For SAIO replication
|
||||
devices = /srv/1/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_port = 6051
|
||||
bind_port = 6251
|
||||
user = swift
|
||||
log_facility = LOG_LOCAL2
|
||||
recon_cache_path = /var/cache/swift
|
||||
@ -295,7 +295,7 @@ For SAIO replication
|
||||
devices = /srv/2/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_port = 6061
|
||||
bind_port = 6261
|
||||
user = swift
|
||||
log_facility = LOG_LOCAL3
|
||||
recon_cache_path = /var/cache/swift2
|
||||
@ -319,7 +319,7 @@ For SAIO replication
|
||||
devices = /srv/3/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_port = 6071
|
||||
bind_port = 6271
|
||||
user = swift
|
||||
log_facility = LOG_LOCAL4
|
||||
recon_cache_path = /var/cache/swift3
|
||||
@ -343,7 +343,7 @@ For SAIO replication
|
||||
devices = /srv/4/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_port = 6081
|
||||
bind_port = 6281
|
||||
user = swift
|
||||
log_facility = LOG_LOCAL5
|
||||
recon_cache_path = /var/cache/swift4
|
||||
@ -369,7 +369,7 @@ For SAIO replication
|
||||
devices = /srv/1/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_port = 6052
|
||||
bind_port = 6252
|
||||
user = swift
|
||||
log_facility = LOG_LOCAL2
|
||||
recon_cache_path = /var/cache/swift
|
||||
@ -393,7 +393,7 @@ For SAIO replication
|
||||
devices = /srv/2/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_port = 6062
|
||||
bind_port = 6262
|
||||
user = swift
|
||||
log_facility = LOG_LOCAL3
|
||||
recon_cache_path = /var/cache/swift2
|
||||
@ -417,7 +417,7 @@ For SAIO replication
|
||||
devices = /srv/3/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_port = 6072
|
||||
bind_port = 6272
|
||||
user = swift
|
||||
log_facility = LOG_LOCAL4
|
||||
recon_cache_path = /var/cache/swift3
|
||||
@ -441,7 +441,7 @@ For SAIO replication
|
||||
devices = /srv/4/node
|
||||
mount_check = false
|
||||
disable_fallocate = true
|
||||
bind_port = 6082
|
||||
bind_port = 6282
|
||||
user = swift
|
||||
log_facility = LOG_LOCAL5
|
||||
recon_cache_path = /var/cache/swift4
|
||||
|
@ -1,7 +1,7 @@
|
||||
[DEFAULT]
|
||||
devices = /srv/node/
|
||||
bind_ip = 127.0.0.1
|
||||
bind_port = 6002
|
||||
bind_port = 6202
|
||||
workers = 2
|
||||
mount_check = false
|
||||
log_facility = LOG_LOCAL5
|
||||
|
@ -1,7 +1,7 @@
|
||||
[DEFAULT]
|
||||
devices = /srv/node/
|
||||
bind_ip = 127.0.0.1
|
||||
bind_port = 6001
|
||||
bind_port = 6201
|
||||
workers = 2
|
||||
mount_check = false
|
||||
log_facility = LOG_LOCAL4
|
||||
|
@ -1,7 +1,7 @@
|
||||
[DEFAULT]
|
||||
devices = /srv/node/
|
||||
bind_ip = 127.0.0.1
|
||||
bind_port = 6000
|
||||
bind_port = 6200
|
||||
workers = 2
|
||||
mount_check = false
|
||||
log_facility = LOG_LOCAL3
|
||||
|
@ -9,12 +9,12 @@ done
|
||||
|
||||
|
||||
for drive in `ls /srv/node/ | grep 'swift-d'`; do
|
||||
echo "swift-ring-builder object.builder add r1z1-127.0.0.1:6000/$drive 1" >> /etc/swift/remakerings.object
|
||||
echo "pushed command to add r1z1-127.0.0.1:6000/$drive to /etc/swift/remakerings.object"
|
||||
echo "swift-ring-builder container.builder add r1z1-127.0.0.1:6001/$drive 1" >> /etc/swift/remakerings.container
|
||||
echo "pushed command to add r1z1-127.0.0.1:6001/$drive to /etc/swift/remakerings.container"
|
||||
echo "swift-ring-builder account.builder add r1z1-127.0.0.1:6002/$drive 1" >> /etc/swift/remakerings.account
|
||||
echo "pushed command to add r1z1-127.0.0.1:6002/$drive to /etc/swift/remakerings.account"
|
||||
echo "swift-ring-builder object.builder add r1z1-127.0.0.1:6200/$drive 1" >> /etc/swift/remakerings.object
|
||||
echo "pushed command to add r1z1-127.0.0.1:6200/$drive to /etc/swift/remakerings.object"
|
||||
echo "swift-ring-builder container.builder add r1z1-127.0.0.1:6201/$drive 1" >> /etc/swift/remakerings.container
|
||||
echo "pushed command to add r1z1-127.0.0.1:6201/$drive to /etc/swift/remakerings.container"
|
||||
echo "swift-ring-builder account.builder add r1z1-127.0.0.1:6202/$drive 1" >> /etc/swift/remakerings.account
|
||||
echo "pushed command to add r1z1-127.0.0.1:6202/$drive to /etc/swift/remakerings.account"
|
||||
done
|
||||
|
||||
for p in $POLICIES; do
|
||||
|
@ -56,26 +56,26 @@ lock file = /var/lock/object.lock
|
||||
#
|
||||
# So, on your SAIO, you have to set the following rsyncd configuration:
|
||||
#
|
||||
#[object6010]
|
||||
#[object6210]
|
||||
#max connections = 25
|
||||
#path = /srv/1/node/
|
||||
#read only = false
|
||||
#lock file = /var/lock/object6010.lock
|
||||
#lock file = /var/lock/object6210.lock
|
||||
#
|
||||
#[object6020]
|
||||
#[object6220]
|
||||
#max connections = 25
|
||||
#path = /srv/2/node/
|
||||
#read only = false
|
||||
#lock file = /var/lock/object6020.lock
|
||||
#lock file = /var/lock/object6220.lock
|
||||
#
|
||||
#[object6030]
|
||||
#[object6230]
|
||||
#max connections = 25
|
||||
#path = /srv/3/node/
|
||||
#read only = false
|
||||
#lock file = /var/lock/object6030.lock
|
||||
#lock file = /var/lock/object6230.lock
|
||||
#
|
||||
#[object6040]
|
||||
#[object6240]
|
||||
#max connections = 25
|
||||
#path = /srv/4/node/
|
||||
#read only = false
|
||||
#lock file = /var/lock/object6040.lock
|
||||
#lock file = /var/lock/object6240.lock
|
||||
|
@ -6,7 +6,7 @@
|
||||
# Change the debug level as you see fit
|
||||
#
|
||||
# For example:
|
||||
# Replace %PORT% by 6012
|
||||
# Replace %PORT% by 6212
|
||||
# Replace %SERVICENAME% by account-server-1
|
||||
# Replace %USER% with apache (or remove it for default)
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
# Change the debug level as you see fit
|
||||
#
|
||||
# For example:
|
||||
# Replace %PORT% by 6011
|
||||
# Replace %PORT% by 6211
|
||||
# Replace %SERVICENAME% by container-server-1
|
||||
# Replace %USER% with apache (or remove it for default)
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
# Change the debug level as you see fit
|
||||
#
|
||||
# For example:
|
||||
# Replace %PORT% by 6010
|
||||
# Replace %PORT% by 6210
|
||||
# Replace %SERVICENAME% by object-server-1
|
||||
# Replace %USER% with apache (or remove it for default)
|
||||
|
||||
|
@ -221,7 +221,7 @@ class SwiftRecon(object):
|
||||
Compare ring md5sum's with those on remote host
|
||||
|
||||
:param hosts: set of hosts to check. in the format of:
|
||||
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
|
||||
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
|
||||
:param swift_dir: The local directory with the ring files.
|
||||
"""
|
||||
matches = 0
|
||||
@ -275,7 +275,7 @@ class SwiftRecon(object):
|
||||
Compare swift.conf md5sum with that on remote hosts
|
||||
|
||||
:param hosts: set of hosts to check. in the format of:
|
||||
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
|
||||
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
|
||||
:param printfn: function to print text; defaults to print()
|
||||
"""
|
||||
matches = 0
|
||||
@ -307,7 +307,7 @@ class SwiftRecon(object):
|
||||
Obtain and print async pending statistics
|
||||
|
||||
:param hosts: set of hosts to check. in the format of:
|
||||
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
|
||||
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
|
||||
"""
|
||||
scan = {}
|
||||
recon = Scout("async", self.verbose, self.suppress_errors,
|
||||
@ -329,7 +329,7 @@ class SwiftRecon(object):
|
||||
Obtain and print drive audit error statistics
|
||||
|
||||
:param hosts: set of hosts to check. in the format of:
|
||||
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]
|
||||
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)]
|
||||
"""
|
||||
scan = {}
|
||||
recon = Scout("driveaudit", self.verbose, self.suppress_errors,
|
||||
@ -351,7 +351,7 @@ class SwiftRecon(object):
|
||||
Check for and print unmounted drives
|
||||
|
||||
:param hosts: set of hosts to check. in the format of:
|
||||
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
|
||||
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
|
||||
"""
|
||||
unmounted = {}
|
||||
errors = {}
|
||||
@ -384,7 +384,7 @@ class SwiftRecon(object):
|
||||
Check for server types on the ring
|
||||
|
||||
:param hosts: set of hosts to check. in the format of:
|
||||
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
|
||||
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
|
||||
"""
|
||||
errors = {}
|
||||
recon = Scout("server_type_check", self.verbose, self.suppress_errors,
|
||||
@ -407,7 +407,7 @@ class SwiftRecon(object):
|
||||
Obtain and print expirer statistics
|
||||
|
||||
:param hosts: set of hosts to check. in the format of:
|
||||
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
|
||||
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
|
||||
"""
|
||||
stats = {'object_expiration_pass': [], 'expired_last_pass': []}
|
||||
recon = Scout("expirer/%s" % self.server_type, self.verbose,
|
||||
@ -436,7 +436,7 @@ class SwiftRecon(object):
|
||||
Obtain and print replication statistics
|
||||
|
||||
:param hosts: set of hosts to check. in the format of:
|
||||
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
|
||||
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
|
||||
"""
|
||||
stats = {'replication_time': [], 'failure': [], 'success': [],
|
||||
'attempted': []}
|
||||
@ -504,7 +504,7 @@ class SwiftRecon(object):
|
||||
Obtain and print updater statistics
|
||||
|
||||
:param hosts: set of hosts to check. in the format of:
|
||||
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
|
||||
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
|
||||
"""
|
||||
stats = []
|
||||
recon = Scout("updater/%s" % self.server_type, self.verbose,
|
||||
@ -531,7 +531,7 @@ class SwiftRecon(object):
|
||||
Obtain and print obj auditor statistics
|
||||
|
||||
:param hosts: set of hosts to check. in the format of:
|
||||
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
|
||||
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
|
||||
"""
|
||||
scan = {}
|
||||
adone = '%s_auditor_pass_completed' % self.server_type
|
||||
@ -603,7 +603,7 @@ class SwiftRecon(object):
|
||||
Obtain and print obj auditor statistics
|
||||
|
||||
:param hosts: set of hosts to check. in the format of:
|
||||
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
|
||||
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
|
||||
"""
|
||||
all_scan = {}
|
||||
zbf_scan = {}
|
||||
@ -679,7 +679,7 @@ class SwiftRecon(object):
|
||||
Obtain and print load average statistics
|
||||
|
||||
:param hosts: set of hosts to check. in the format of:
|
||||
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
|
||||
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
|
||||
"""
|
||||
load1 = {}
|
||||
load5 = {}
|
||||
@ -708,7 +708,7 @@ class SwiftRecon(object):
|
||||
Obtain and print quarantine statistics
|
||||
|
||||
:param hosts: set of hosts to check. in the format of:
|
||||
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
|
||||
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
|
||||
"""
|
||||
objq = {}
|
||||
conq = {}
|
||||
@ -742,7 +742,7 @@ class SwiftRecon(object):
|
||||
Obtain and print /proc/net/sockstat statistics
|
||||
|
||||
:param hosts: set of hosts to check. in the format of:
|
||||
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
|
||||
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
|
||||
"""
|
||||
inuse4 = {}
|
||||
mem = {}
|
||||
@ -776,7 +776,7 @@ class SwiftRecon(object):
|
||||
Obtain and print disk usage statistics
|
||||
|
||||
:param hosts: set of hosts to check. in the format of:
|
||||
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
|
||||
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
|
||||
"""
|
||||
stats = {}
|
||||
highs = []
|
||||
@ -875,7 +875,7 @@ class SwiftRecon(object):
|
||||
Check a time synchronization of hosts with current time
|
||||
|
||||
:param hosts: set of hosts to check. in the format of:
|
||||
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
|
||||
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
|
||||
:param jitter: Maximal allowed time jitter
|
||||
"""
|
||||
|
||||
@ -914,7 +914,7 @@ class SwiftRecon(object):
|
||||
Check OS Swift version of hosts. Inform if differs.
|
||||
|
||||
:param hosts: set of hosts to check. in the format of:
|
||||
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
|
||||
set([('127.0.0.1', 6220), ('127.0.0.2', 6230)])
|
||||
"""
|
||||
versions = set()
|
||||
errors = 0
|
||||
|
@ -52,9 +52,9 @@ of endpoints having the same form as described above, and a key 'headers' that
|
||||
maps to a dictionary of headers that should be sent with a request made to
|
||||
the endpoints, e.g.::
|
||||
|
||||
{ "endpoints": {"http://10.1.1.1:6010/sda1/2/a/c3/o1",
|
||||
"http://10.1.1.1:6030/sda3/2/a/c3/o1",
|
||||
"http://10.1.1.1:6040/sda4/2/a/c3/o1"},
|
||||
{ "endpoints": {"http://10.1.1.1:6210/sda1/2/a/c3/o1",
|
||||
"http://10.1.1.1:6230/sda3/2/a/c3/o1",
|
||||
"http://10.1.1.1:6240/sda4/2/a/c3/o1"},
|
||||
"headers": {"X-Backend-Storage-Policy-Index": "1"}}
|
||||
|
||||
In this example, the 'headers' dictionary indicates that requests to the
|
||||
|
@ -1437,17 +1437,17 @@ class RingBuilder(object):
|
||||
{(): 3.0,
|
||||
(1,): 3.0,
|
||||
(1, 1): 1.0,
|
||||
(1, 1, '127.0.0.1:6010'): 1.0,
|
||||
(1, 1, '127.0.0.1:6010', 0): 1.0,
|
||||
(1, 1, '127.0.0.1:6210'): 1.0,
|
||||
(1, 1, '127.0.0.1:6210', 0): 1.0,
|
||||
(1, 2): 1.0,
|
||||
(1, 2, '127.0.0.1:6020'): 1.0,
|
||||
(1, 2, '127.0.0.1:6020', 1): 1.0,
|
||||
(1, 2, '127.0.0.1:6220'): 1.0,
|
||||
(1, 2, '127.0.0.1:6220', 1): 1.0,
|
||||
(1, 3): 1.0,
|
||||
(1, 3, '127.0.0.1:6030'): 1.0,
|
||||
(1, 3, '127.0.0.1:6030', 2): 1.0,
|
||||
(1, 3, '127.0.0.1:6230'): 1.0,
|
||||
(1, 3, '127.0.0.1:6230', 2): 1.0,
|
||||
(1, 4): 1.0,
|
||||
(1, 4, '127.0.0.1:6040'): 1.0,
|
||||
(1, 4, '127.0.0.1:6040', 3): 1.0}
|
||||
(1, 4, '127.0.0.1:6240'): 1.0,
|
||||
(1, 4, '127.0.0.1:6240', 3): 1.0}
|
||||
|
||||
"""
|
||||
# Used by walk_tree to know what entries to create for each recursive
|
||||
|
@ -159,7 +159,7 @@ class TestContainerFailures(ReplProbeTest):
|
||||
onode = onodes[0]
|
||||
db_files = []
|
||||
for onode in onodes:
|
||||
node_id = (onode['port'] - 6000) // 10
|
||||
node_id = (onode['port'] % 100) // 10
|
||||
device = onode['device']
|
||||
hash_str = hash_path(self.account, container)
|
||||
server_conf = readconf(self.configs['container-server'][node_id])
|
||||
|
@ -63,7 +63,7 @@ class TestObjectFailures(ReplProbeTest):
|
||||
opart, onodes = self.object_ring.get_nodes(
|
||||
self.account, container, obj)
|
||||
onode = onodes[0]
|
||||
node_id = (onode['port'] - 6000) / 10
|
||||
node_id = (onode['port'] % 100) / 10
|
||||
device = onode['device']
|
||||
hash_str = hash_path(self.account, container, obj)
|
||||
obj_server_conf = readconf(self.configs['object-server'][node_id])
|
||||
|
@ -139,13 +139,13 @@ class TestObjectHandoff(ReplProbeTest):
|
||||
port_num = node['replication_port']
|
||||
except KeyError:
|
||||
port_num = node['port']
|
||||
node_id = (port_num - 6000) // 10
|
||||
node_id = (port_num % 100) // 10
|
||||
Manager(['object-replicator']).once(number=node_id)
|
||||
try:
|
||||
another_port_num = another_onode['replication_port']
|
||||
except KeyError:
|
||||
another_port_num = another_onode['port']
|
||||
another_num = (another_port_num - 6000) // 10
|
||||
another_num = (another_port_num % 100) // 10
|
||||
Manager(['object-replicator']).once(number=another_num)
|
||||
|
||||
# Assert the first container/obj primary server now has container/obj
|
||||
@ -231,9 +231,9 @@ class TestObjectHandoff(ReplProbeTest):
|
||||
port_num = node['replication_port']
|
||||
except KeyError:
|
||||
port_num = node['port']
|
||||
node_id = (port_num - 6000) // 10
|
||||
node_id = (port_num % 100) // 10
|
||||
Manager(['object-replicator']).once(number=node_id)
|
||||
another_node_id = (another_port_num - 6000) // 10
|
||||
another_node_id = (another_port_num % 100) // 10
|
||||
Manager(['object-replicator']).once(number=another_node_id)
|
||||
|
||||
# Assert primary node no longer has container/obj
|
||||
|
@ -132,7 +132,7 @@ class TestReconstructorRevert(ECProbeTest):
|
||||
|
||||
# fire up reconstructor on handoff nodes only
|
||||
for hnode in hnodes:
|
||||
hnode_id = (hnode['port'] - 6000) // 10
|
||||
hnode_id = (hnode['port'] % 100) // 10
|
||||
self.reconstructor.once(number=hnode_id)
|
||||
|
||||
# first three primaries have data again
|
||||
|
@ -7,32 +7,32 @@ cd /home/{{ ansible_ssh_user }}/rings
|
||||
rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
|
||||
|
||||
swift-ring-builder object.builder create 10 3 1
|
||||
swift-ring-builder object.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb1 1
|
||||
swift-ring-builder object.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb2 1
|
||||
swift-ring-builder object.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb3 1
|
||||
swift-ring-builder object.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6210/sdb1 1
|
||||
swift-ring-builder object.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6210/sdb2 1
|
||||
swift-ring-builder object.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6210/sdb3 1
|
||||
swift-ring-builder object.builder rebalance
|
||||
swift-ring-builder object-1.builder create 10 2 1
|
||||
swift-ring-builder object-1.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb1 1
|
||||
swift-ring-builder object-1.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb2 1
|
||||
swift-ring-builder object-1.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb3 1
|
||||
swift-ring-builder object-1.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6210/sdb1 1
|
||||
swift-ring-builder object-1.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6210/sdb2 1
|
||||
swift-ring-builder object-1.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6210/sdb3 1
|
||||
swift-ring-builder object-1.builder rebalance
|
||||
swift-ring-builder object-2.builder create 10 6 1
|
||||
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb1 1
|
||||
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb2 1
|
||||
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb3 1
|
||||
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb4 1
|
||||
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb5 1
|
||||
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb6 1
|
||||
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb7 1
|
||||
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb8 1
|
||||
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6210/sdb1 1
|
||||
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6210/sdb2 1
|
||||
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6210/sdb3 1
|
||||
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6210/sdb4 1
|
||||
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6210/sdb5 1
|
||||
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6210/sdb6 1
|
||||
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6210/sdb7 1
|
||||
swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6210/sdb8 1
|
||||
swift-ring-builder object-2.builder rebalance
|
||||
swift-ring-builder container.builder create 10 3 1
|
||||
swift-ring-builder container.builder add r1z1-{{ hostvars['container1'].nodepool.public_ipv4 }}:6011/sdb1 1
|
||||
swift-ring-builder container.builder add r1z1-{{ hostvars['container1'].nodepool.public_ipv4 }}:6011/sdb2 1
|
||||
swift-ring-builder container.builder add r1z1-{{ hostvars['container1'].nodepool.public_ipv4 }}:6011/sdb3 1
|
||||
swift-ring-builder container.builder add r1z1-{{ hostvars['container1'].nodepool.public_ipv4 }}:6211/sdb1 1
|
||||
swift-ring-builder container.builder add r1z1-{{ hostvars['container1'].nodepool.public_ipv4 }}:6211/sdb2 1
|
||||
swift-ring-builder container.builder add r1z1-{{ hostvars['container1'].nodepool.public_ipv4 }}:6211/sdb3 1
|
||||
swift-ring-builder container.builder rebalance
|
||||
swift-ring-builder account.builder create 10 3 1
|
||||
swift-ring-builder account.builder add r1z1-{{ hostvars['account1'].nodepool.public_ipv4 }}:6012/sdb1 1
|
||||
swift-ring-builder account.builder add r1z1-{{ hostvars['account1'].nodepool.public_ipv4 }}:6012/sdb2 1
|
||||
swift-ring-builder account.builder add r1z1-{{ hostvars['account1'].nodepool.public_ipv4 }}:6012/sdb3 1
|
||||
swift-ring-builder account.builder add r1z1-{{ hostvars['account1'].nodepool.public_ipv4 }}:6212/sdb1 1
|
||||
swift-ring-builder account.builder add r1z1-{{ hostvars['account1'].nodepool.public_ipv4 }}:6212/sdb2 1
|
||||
swift-ring-builder account.builder add r1z1-{{ hostvars['account1'].nodepool.public_ipv4 }}:6212/sdb3 1
|
||||
swift-ring-builder account.builder rebalance
|
||||
|
Loading…
Reference in New Issue
Block a user