设为首页 收藏本站
查看: 1150|回复: 0

[经验分享] Install and Configure OpenStack Object Storage (Swift)

[复制链接]
发表于 2018-6-2 10:44:06 | 显示全部楼层 |阅读模式
  Based on OpenStack Icehouse release
DSC0000.jpg

DSC0001.jpg

  OpenStack Swift Architecture
https://swiftstack.com/openstack-swift/architecture/

DSC0002.jpg

  On keystone auth node:

  # add swift user
keystone user-create --tenant service --name swift --pass SWIFT-USER-PASSWORD

# add swift user in admin role
keystone user-role-add --user swift --tenant service --role admin

# add an entry for swift service
keystone service-create --name=swift --type=object-store --description="Swift Service"

# add an entry for swift endpoint
keystone endpoint-create --region RegionOne --service swift --publicurl=http://LOAD-BALANCER-OF-PROXY:8080/v1/AUTH_%\(tenant_id\)s --internalurl=http://LOAD-BALANCER-OF-PROXY:8080/v1/AUTH_%\(tenant_id\)s --adminurl=http://LOAD-BALANCER-OF-PROXY:8080
  

  On Swift Storage nodes:

  1. service NetworkManager stop; chkconfig NetworkManager off
service network start; chkconfig network on

disable firewall and selinux
service iptables stop; chkconfig iptables off
service ip6tables stop; chkconfig ip6tables off

2. using eth0 to connect proxy servers and other storage nodes

  
3. set hostname in /etc/sysconfig/network and /etc/hosts
192.168.20.30    proxynode1
  192.168.20.31    proxynode2
  
4. yum -y install ntp
vi /etc/ntp.conf
server 192.168.20.30 perfer

  server 192.168.20.31
restrict 192.168.20.30
  restrict 192.168.20.31

service ntpd start; chkconfig ntpd on

5. yum -y install  http://repos.fedorapeople.org/repos/openstack/openstack-icehouse/rdo-release-icehouse-3.noarch.rpm
  yum -y install http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
yum -y install mysql MySQL-python
  

  6. yum -y install openstack-swift-account openstack-swift-container openstack-swift-object xfsprogs xinetd

7. RAID on the storage drives is not required and not recommended, use a single partition per drive

swift storage node1:
single partition, using fdisk, /dev/sdb --> /dev/sdb1 --> /srv/node/sdb1
  /dev/sdc --> /dev/sdc1 --> /srv/node/sdc1
  partprobe /dev/sdb /dev/sdc

  mkfs.xfs -i size=1024 /dev/sdb1
  mkfs.xfs -i size=1024 /dev/sdc1
  mkdir -p /srv/node/sdb1; mkdir -p /srv/node/sdc1
echo "/dev/sdb1 /srv/node/sdb1 xfs noatime,nodiratime,nobarrier,logbufs=8 0 0" >> /etc/fstab
echo "/dev/sdc1 /srv/node/sdc1 xfs noatime,nodiratime,nobarrier,logbufs=8 0 0" >> /etc/fstab
mount /srv/node/sdb1; mount /srv/node/sdc1
chown -R swift:swift /srv/node
  

  vi /etc/swift/swift.conf
# change (it is shared between Nodes - any words you like)
[swift-hash]
swift_hash_path_suffix = swift_shared_path
  

  vi /etc/swift/account-server.conf
bind_ip =192.168.20.40

vi /etc/swift/container-server.conf
bind_ip =192.168.20.40

vi /etc/swift/object-server.conf
bind_ip =192.168.20.40
  
vi /etc/rsyncd.conf
uid = swift
gid = swift
log file = /var/log/rsyncd.log
pid file = /var/run/rsyncd.pid
address = STORAGE_LOCAL_NET_IP # 192.168.20.40

[account]
max connections = 25
path = /srv/node/
read only = false
lock file = /var/lock/account.lock

[container]
max connections = 25
path = /srv/node/
read only = false
lock file = /var/lock/container.lock

[object]
max connections = 25
path = /srv/node/
read only = false
lock file = /var/lock/object.lock

vi /etc/xinetd.d/rsync
disable = no

service xinetd start; chkconfig xinetd on

mkdir -p /var/swift/recon; chown -R swift:swift /var/swift/recon
  

  swift storage node2:
single partition, using fdisk, /dev/sdb --> /dev/sdb1 --> /srv/node/sdb1

  /dev/sdc --> /dev/sdc1 --> /srv/node/sdc1
  partprobe /dev/sdb /dev/sdc
  mkfs.xfs -i size=1024 /dev/sdb1
  mkfs.xfs -i size=1024 /dev/sdc1

  mkdir -p /srv/node/sdb1; mkdir -p /srv/node/sdc1
echo "/dev/sdb1 /srv/node/sdb1 xfs noatime,nodiratime,nobarrier,logbufs=8 0 0" >> /etc/fstab
echo "/dev/sdc1 /srv/node/sdc1 xfs noatime,nodiratime,nobarrier,logbufs=8 0 0" >> /etc/fstab
mount /srv/node/sdb1; mount /srv/node/sdc1
chown -R swift:swift /srv/node
  

  vi /etc/swift/swift.conf
# change (it is shared between Nodes - any words you like)
[swift-hash]
swift_hash_path_suffix = swift_shared_path
  

  vi /etc/swift/account-server.conf
bind_ip =192.168.20.41

vi /etc/swift/container-server.conf
bind_ip =192.168.20.41

vi /etc/swift/object-server.conf
bind_ip =192.168.20.41

  
vi /etc/rsyncd.conf
uid = swift
gid = swift
log file = /var/log/rsyncd.log
pid file = /var/run/rsyncd.pid
address = STORAGE_LOCAL_NET_IP # 192.168.20.41
  

  [account]
max connections = 25
path = /srv/node/
read only = false
lock file = /var/lock/account.lock

[container]
max connections = 25
path = /srv/node/
read only = false
lock file = /var/lock/container.lock

[object]
max connections = 25
path = /srv/node/
read only = false
lock file = /var/lock/object.lock

vi /etc/xinetd.d/rsync
disable = no

service xinetd start; chkconfig xinetd on

mkdir -p /var/swift/recon; chown -R swift:swift /var/swift/recon
  

  Swift Proxy Load balancer:
  1. yum -y install Pound

2. vi /etc/pound.cfg

User "pound"
Group "pound"
Control "/var/lib/pound/pound.cfg"

ListenHTTP
    Address LOAD-BALANCER-NAME|IP-OF-LOAD-BALANCER
    Port 8080
      xHTTP 2
End

#ListenHTTPS
#    Address 0.0.0.0
#    Port    443
#    Cert    "/etc/pki/tls/certs/pound.pem"
#End

Service
    BackEnd
        Address 192.168.1.30
        Port    8080
    End

    BackEnd
        Address 192.168.1.31
        Port    8080
    End
End

service pound start; chkconfig pound on
  

  On First Swift Proxy:
  1. service NetworkManager stop; chkconfig NetworkManager off
service network start; chkconfig network on

disable firewall and selinux
service iptables stop; chkconfig iptables off
service ip6tables stop; chkconfig ip6tables off

2. eth0 for management/public/floating (192.168.1.0/24), eth1 for internal/flat (192.168.20.0/24), it's recommended to use seperated nic for management network
  
3. set hostname in /etc/sysconfig/network and /etc/hosts
192.168.1.10    controller
192.168.1.11    node1
192.168.1.30    proxynode1
  192.168.1.31    proxynode2
  
4. yum -y install ntp
vi /etc/ntp.conf
server 192.168.1.10
restrict 192.168.1.10
  restrict 192.168.20.0 mask 255.255.255.0 nomodify notrap
  
service ntpd start; chkconfig ntpd on

5. yum -y install  http://repos.fedorapeople.org/repos/openstack/openstack-icehouse/rdo-release-icehouse-3.noarch.rpm
  yum -y install http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
yum -y install mysql MySQL-python
  

  6. yum -y install openstack-swift-proxy memcached python-swiftclient python-keystone-auth-token
  

  vi /etc/sysconfig/memcached
  OPTIONS="-l 192.168.20.30"
  

  service memcached start; chkconfig memcached on

7. vi /etc/swift/proxy-server.conf

  [filter:cache]
use = egg:swift#memcache
memcache_servers = 192.168.20.30:11211
  

  [filter:authtoken]
  admin_tenant_name = service
admin_user = swift
admin_password = SWIFT-USER-PASSWORD
auth_host = controller
auth_port = 35357
auth_protocol = http
  signing_dir = /tmp/keystone-signing-swift

vi /etc/swift/swift.conf
# change (it is shared between Nodes - any words you like)
[swift-hash]
swift_hash_path_suffix = swift_shared_path

  

  mkdir /tmp/keystone-signing-swift
  chown -R swift:swift /tmp/keystone-signing-swift

  

  8. please check http://rackerlabs.github.io/swift-ppc/, you should have at least 3 disks for the ring (my thought :))

  cd /etc/swift
swift-ring-builder account.builder create 8 3 1
swift-ring-builder container.builder create 8 3 1
swift-ring-builder object.builder create 8 3 1
  you should run above commands only once, so take care of the part_power value

  

  Notes: check https://swiftstack.com/blog/2012/04/09/swift-capacity-management/ for more info on weight, you can use disk capacity in GB for weight value during first initilization

  

  swift-ring-builder account.builder add r1z1-192.168.20.40:6002/sdb1 25
swift-ring-builder container.builder add r1z1-192.168.20.40:6001/sdb1 25
swift-ring-builder object.builder add r1z1-192.168.20.40:6000/sdb1 25

swift-ring-builder account.builder add r1z1-192.168.20.40:6002/sdc1 25
swift-ring-builder container.builder add r1z1-192.168.20.40:6001/sdc1 25
swift-ring-builder object.builder add r1z1-192.168.20.140:6000/sdc1 25

swift-ring-builder account.builder add r1z2-192.168.20.41:6002/sdb1 25
swift-ring-builder container.builder add r1z2-192.168.20.41:6001/sdb1 25
swift-ring-builder object.builder add r1z2-192.168.20.41:6000/sdb1 25

swift-ring-builder account.builder add r1z2-192.168.20.41:6002/sdc1 25
swift-ring-builder container.builder add r1z2-192.168.20.41:6001/sdc1 25
swift-ring-builder object.builder add r1z2-192.168.20.41:6000/sdc1 25

  

  Verify the ring contents for each ring (cd /etc/swift to run below commands):
swift-ring-builder account.builder
swift-ring-builder container.builder
swift-ring-builder object.builder
  
swift-ring-builder account.builder rebalance
swift-ring-builder container.builder rebalance
swift-ring-builder object.builder rebalance

chown -R swift:swift /etc/swift

scp /etc/swift/*.gz 192.168.20.40:/etc/swift/
scp /etc/swift/*.gz 192.168.20.41:/etc/swift/

service openstack-swift-proxy start; chkconfig openstack-swift-proxy on

  

  on swift storage node1 and node2:
  1. chown -R swift:swift /etc/swift
  

  2. for service in openstack-swift-object openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object-auditor openstack-swift-container openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container-auditor openstack-swift-account openstack-swift-account-replicator openstack-swift-account-reaper openstack-swift-account-auditor; do
service $service start
  chkconfig $service on
done
  

  On keystone auth node:
  swift stat
  

  Second Swift Proxy Server:

1. service NetworkManager stop; chkconfig NetworkManager off
service network start; chkconfig network on

disable firewall and selinux
service iptables stop; chkconfig iptables off
service ip6tables stop; chkconfig ip6tables off

2. eth0 for management/public/floating (192.168.1.0/24), eth1 for internal/flat (192.168.20.0/24), it's recommended to use seperated nic for management network
  

  3. set hostname in /etc/sysconfig/network and /etc/hosts
192.168.1.10    controller
192.168.1.11    node1
192.168.1.30    proxynode1
  192.168.1.31    proxynode2
  

  4. yum -y install ntp
vi /etc/ntp.conf
server 192.168.1.10
restrict 192.168.1.10
  restrict 192.168.20.0 mask 255.255.255.0 nomodify notrap
  
service ntpd start; chkconfig ntpd on

5. yum -y install http://repos.fedorapeople.org/repos/openstack/openstack-icehouse/rdo-release-icehouse-3.noarch.rpm
  yum -y install http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
yum -y install mysql MySQL-python

6. yum -y install openstack-swift-proxy memcached python-swiftclient python-keystone-auth-token

vi /etc/sysconfig/memcached
OPTIONS="-l 192.168.20.31"

service memcached start; chkconfig memcached on

7. vi /etc/swift/proxy-server.conf
[filter:cache]
use = egg:swift#memcache
memcache_servers = 192.168.20.30:11211,192.168.20.31:11211

[filter:authtoken]
admin_tenant_name = service
admin_user = swift
admin_password = SWIFT-USER-PASSWORD
auth_host = controller
auth_port = 35357
auth_protocol = http
signing_dir = /tmp/keystone-signing-swift

vi /etc/swift/swift.conf
# change (it is shared between Nodes - any words you like)
[swift-hash]
swift_hash_path_suffix = swift_shared_path

mkdir /tmp/keystone-signing-swift
chown -R swift:swift /tmp/keystone-signing-swift

8. scp 192.168.1.30:/etc/swift/*.gz /etc/swift
chown -R swift:swift /etc/swift

9. service openstack-swift-proxy start; chkconfig openstack-swift-proxy on
  

  on first swift proxy server:

vi /etc/swift/proxy-server.conf
[filter:cache]
use = egg:swift#memcache
memcache_servers = 192.168.20.30:11211,192.168.20.31:11211

service openstack-swift-proxy restart
  

  To verify installation:
  on controller node
  source ~/adminrc
  swift stat

  

运维网声明 1、欢迎大家加入本站运维交流群:群②:261659950 群⑤:202807635 群⑦870801961 群⑧679858003
2、本站所有主题由该帖子作者发表,该帖子作者与运维网享有帖子相关版权
3、所有作品的著作权均归原作者享有,请您和我们一样尊重他人的著作权等合法权益。如果您对作品感到满意,请购买正版
4、禁止制作、复制、发布和传播具有反动、淫秽、色情、暴力、凶杀等内容的信息,一经发现立即删除。若您因此触犯法律,一切后果自负,我们对此不承担任何责任
5、所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其内容的准确性、可靠性、正当性、安全性、合法性等负责,亦不承担任何法律责任
6、所有作品仅供您个人学习、研究或欣赏,不得用于商业或者其他用途,否则,一切后果均由您自己承担,我们对此不承担任何法律责任
7、如涉及侵犯版权等问题,请您及时通知我们,我们将立即采取措施予以解决
8、联系人Email:admin@iyunv.com 网址:www.yunweiku.com

所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其承担任何法律责任,如涉及侵犯版权等问题,请您及时通知我们,我们将立即处理,联系人Email:kefu@iyunv.com,QQ:1061981298 本贴地址:https://www.yunweiku.com/thread-506162-1-1.html 上篇帖子: Install and Configure OpenStack Block Storage (Cinder) 下篇帖子: 简化 Openstack Havana 版 compute 节点部署程序
您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

扫码加入运维网微信交流群X

扫码加入运维网微信交流群

扫描二维码加入运维网微信交流群,最新一手资源尽在官方微信交流群!快快加入我们吧...

扫描微信二维码查看详情

客服E-mail:kefu@iyunv.com 客服QQ:1061981298


QQ群⑦:运维网交流群⑦ QQ群⑧:运维网交流群⑧ k8s群:运维网kubernetes交流群


提醒:禁止发布任何违反国家法律、法规的言论与图片等内容;本站内容均来自个人观点与网络等信息,非本站认同之观点.


本站大部分资源是网友从网上搜集分享而来,其版权均归原作者及其网站所有,我们尊重他人的合法权益,如有内容侵犯您的合法权益,请及时与我们联系进行核实删除!



合作伙伴: 青云cloud

快速回复 返回顶部 返回列表