Centos建议最低配置:
控制节点:1个处理器,4 GB内存和5 GB存储
计算节点:1个处理器,2 GB内存和10 GB存储

一,准备工作
所有节点配置域名解析
vim /etc/hosts
192.168.1.135 controller
192.168.1.136 computer1
192.168.1.137 cinder

关闭firewalld(测试很重要)
systemctl stop firewalld
systemctl disable firewalld
更改selinux状态(测试很重要)
vi /etc/sysconfig/selinux
SELINUX=disabled

更改主机名,避免发现计算节点主机时,因主机名都为localhost而产生不必要的冲突
控制节点
hostnamectl set-hostname controller
计算节点
hostnamectl set-hostname compute
cinder存储节点
hostnamectl set-hostname cinder

#yum -y install chrony #所有节点都安装
#vim /etc/chrony.conf #controller节点的不怎么用配置,compute节点只留server controller iburst,其他注释掉
#systemctl enable chronyd.service
#systemctl restart chronyd.service
#chronyc sources

安装openstack-ocata版本
#yum -y install centos-release-openstack-ocata.noarch
#yum -y install https://rdoproject.org/repos/rdo-release.rpm
#yum -y upgrade
#yum -y install python-openstackclient #安装opentack必须的插件
#yum -y install yum-plugin-priorities #防止软件自动更新,OpenStack组件装完后,某个软件自动更新的话,可能导致整个服务瘫痪

部署mariadb 控制节点安装
yum -y install mariadb mariadb-server python2-PyMySQL

vim /etc/my.cnf.d/openstack.cnf
[mysqld]
bind-address=192.168.1.135
default-storage-engine=innodb
max_connections = 4096
innodb_file_per_table
collation-server=utf8_general_ci
character-set-server=utf8

#systemctl enable mariadb.service
#systemctl start mariadb.service
echo -e “\nY\n123456\n123456\nY\nn\nY\nY\n” | mysql_secure_installation #自动设置mysql密码为123456,跳过手动步骤

部署消息队列rabbitmq 控制节点安装
#yum -y install rabbitmq-server
#systemctl enable rabbitmq-server.service
#systemctl start rabbitmq-server.service
#rabbitmqctl add_user openstack openstack-password 增加用户openstack,密码openstack-password
#rabbitmqctl set_permissions openstack “." ".” “.*” 给openstack用户授权,没有授权的用户将不能接受和传递消息
#rabbitmqctl set_user_tags openstack administrator 赋予其administrator角色

#systemctl restart rabbitmq-server.service
查看端口是否存在
#netstat -nltp |grep 5672

部署memcache 控制节点安装
#yum -y install memcached python-memcached

修改配置文件中现有的OPTIONS选项
vim /etc/sysconfig/memcached
OPTIONS="-l 127.0.0.1,::1,controller"

#systemctl enable memcached.service
#systemctl start memcached.service

二:认证服务
2.1安装配置 控制节点安装
创建keystone数据库
#mysql -uroot -p123456
MariaDB [(none)]> CREATE DATABASE keystone;
授权
MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO ‘keystone’@‘controller’ IDENTIFIED BY ‘keystone-password’;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO ‘keystone’@’%’ IDENTIFIED BY ‘keystone-password’;
MariaDB [(none)]> flush privileges;

安装keystone及相关组件
#yum -y install openstack-keystone httpd mod_wsgi
  
#vim /etc/keystone/keystone.conf
[database]
connection = mysql+pymysql://keystone:[email protected]/keystone
[token]
provider = fernet

同步数据库
/bin/sh -c “keystone-manage db_sync” keystone
同步完建议进入数据库,查看数据库keystone是否生成表成功,笔者这里用的centos7.2系统,经过多次观察,一般为44个表
mysql -uroot -p123456
MariaDB [(none)]> use keystone;
MariaDB [(keystone)]> show tables;

初始化
#keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
引导身份服务:
#keystone-manage bootstrap --bootstrap-password admin-password
–bootstrap-admin-url http://controller:35357/v3/
–bootstrap-internal-url http://controller:5000/v3/
–bootstrap-public-url http://controller:5000/v3/
–bootstrap-region-id RegionOne

修改httpd配置文件:
#vim /etc/httpd/conf/httpd.conf
ServerAdmin [email protected]
ServerName controller:80 (将ServerName 后面改成主机名,防止启动报错)

在wsgi-keystone配置文件中加入以下内容
#vim /usr/share/keystone/wsgi-keystone.conf

openstack ocata安装步骤

创建一个指向/usr/share/keystone/wsgi-keystone.conf文件的链接:
#ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

启动httpd:
#systemctl enable httpd.service
#systemctl start httpd.service
#netstat -ntpl | grep httpd

创建脚本来设置变量
#vim admin-openrc

export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin-password
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2

#vim demo-openrc

export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=123456
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2

运行环境变量:
#. admin-openrc
创建service项目
#openstack project create --domain default --description “Service Project” service
创建demo项目
#openstack project create --domain default --description “Demo Project” demo
创建demo用户并设置密码 #笔者这里设置的123456,可自行设置
#openstack user create --domain default --password-prompt demo
创建角色user
#openstack role create user
将用户demo以user的角色添加到demo项目
#openstack role add --project demo --user demo user

重置环境变量:
unset OS_AUTH_URL OS_PASSWORD

admin 用户,请求身份验证令牌:
#openstack --os-auth-url http://controller:35357/v3
–os-project-domain-name default --os-user-domain-name default
–os-project-name admin --os-username admin token issue

demo用户,请求身份验证令牌:
#openstack --os-auth-url http://controller:5000/v3 \
–os-project-domain-name default --os-user-domain-name default \
–os-project-name demo --os-username demo token issue

三glance镜像服务
创建glance数据库
#mysql -uroot -p123456
MariaDB [(none)]> CREATE DATABASE glance;(创建 glance 数据库)
 授权
MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO ‘glance’@‘controller’ IDENTIFIED BY ‘glance-password’; (对glance数据库授予恰当的权限)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO ‘glance’@’%’ IDENTIFIED BY ‘glance-password’; (对glance数据库授予恰当的权限)
MariaDB [(none)]> flush privileges;

运行环境变量:
#. admin-openrc
创建glance用户,密码为glance-password,可自行设置密码
#openstack user create --domain default --password-prompt glance
#openstack role add --project service --user glance admin

创建glance服务实体:
#openstack service create --name glance --description “OpenStack Image” image
创建glance服务的 API 端点:
#openstack endpoint create --region RegionOne image public http://controller:9292
#openstack endpoint create --region RegionOne image internal http://controller:9292
#openstack endpoint create --region RegionOne image admin http://controller:9292

安装glance服务
#yum -y install openstack-glance
修改glance配置文件
#vim /etc/glance/glance-api.conf

[database]  
connection = mysql+pymysql://glance:[email protected]/glance
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance-password
  
[paste_deploy]
flavor = keystone
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images

#vim /etc/glance/glance-registry.conf
[database]
connection = mysql+pymysql://glance:[email protected]/glance

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance-password

[paste_deploy]
flavor = keystone

同步glance数据库:
#su -s /bin/sh -c “glance-manage db_sync” glance

#systemctl enable openstack-glance-api.service openstack-glance-registry.service
#systemctl start openstack-glance-api.service openstack-glance-registry.service

验证glance是否正常
#. admin-openrc
下载一个测试用的镜像:
#wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
上传下载的镜像:
#openstack image create “cirros”
–file cirros-0.3.5-x86_64-disk.img
–disk-format qcow2 --container-format bare
–public

#openstack image list

四。计算服务
以下配置步骤在控制节点实行
创建nova的数据库:
#mysql -uroot -p123456
MariaDB [(none)]> CREATE DATABASE nova;
MariaDB [(none)]> CREATE DATABASE nova_api;
MariaDB [(none)]> CREATE DATABASE nova_cell0;

授权:
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO ‘nova’@‘controller’ IDENTIFIED BY ‘nova-password’;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO ‘nova’@’%’ IDENTIFIED BY ‘nova-password’;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO ‘nova’@‘controller’ IDENTIFIED BY ‘nova-password’;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO ‘nova’@’%’ IDENTIFIED BY ‘nova-password’;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO ‘nova’@‘controller’ IDENTIFIED BY ‘nova-password’;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO ‘nova’@’%’ IDENTIFIED BY ‘nova-password’;
MariaDB [(none)]> flush privileges;

运行环境变量:
#. admin-openrc
创建nova用户,密码为nova-password
#openstack user create --domain default --password-prompt nova

#openstack role add --project service --user nova admin
#openstack service create --name nova --description “OpenStack Compute” compute
创建计算服务 API 端点:
#openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
#openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
#openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1

创建placement用户,密码为placement-password
#openstack user create --domain default --password-prompt placement
#openstack role add --project service --user placement admin
#openstack service create --name placement --description “Placement API” placement
#openstack endpoint create --region RegionOne placement public http://controller:8778
#openstack endpoint create --region RegionOne placement internal http://controller:8778
#openstack endpoint create --region RegionOne placement admin http://controller:8778

安装nova服务
#yum -y install openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api
修改nova配置文件
#vim /etc/nova/nova.conf

[DEFAULT]
enabled_apis = osapi_compute,metadata
my_ip = 192.168.1.135
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
transport_url = rabbit://openstack:[email protected]

[api_database]
connection = mysql+pymysql://nova:[email protected]/nova_api

[database]
connection = mysql+pymysql://nova:[email protected]/nova

[api]
auth_strategy = keystone

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova-password

[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip

[glance]
api_servers = http://controller:9292

[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[placement]
os_region_name = RegionOne
project_domain_name = default
project_name = service
auth_type = password
user_domain_name = default
auth_url = http://controller:35357/v3
username = placement
password = placement-password

在placement-api配置文件中添加
#vim /etc/httpd/conf.d/00-nova-placement-api.conf
openstack ocata安装步骤

重启httpd 服务:
#systemctl restart httpd

同步数据库:
#su -s /bin/sh -c “nova-manage api_db sync” nova
#su -s /bin/sh -c “nova-manage cell_v2 map_cell0” nova
#su -s /bin/sh -c “nova-manage cell_v2 create_cell --name=cell1 --verbose” nova
#su -s /bin/sh -c “nova-manage db sync” nova

验证nova cell0和cell1是否正确注册:
nova-manage cell_v2 list_cells
±------±-------------------------------------±-----------------------------------±------------------------------------------------±---------+
| Name | UUID | Transport URL | Database Connection | Disabled |
±------±-------------------------------------±-----------------------------------±------------------------------------------------±---------+
| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:@controller/nova_cell0 | False |
| cell1 | 40aa6629-45c3-4b2d-953a-3e627733380e | rabbit://openstack:
@controller | mysql+pymysql://nova:****@controller/nova | False |
±------±-------------------------------------±-----------------------------------±------------------------------------------------±---------+

配置nova服务开机启动,并重启服务
#systemctl enable openstack-nova-api.service
openstack-nova-consoleauth.service openstack-nova-scheduler.service
openstack-nova-conductor.service openstack-nova-novncproxy.service

#systemctl start openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service

以下配置步骤在计算节点进行
#yum -y install openstack-nova-compute
#vim /etc/nova/nova.conf

[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:[email protected]
my_ip = 192.168.1.136
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver

[api]
auth_strategy = keystone
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova-password

[vnc]
enabled = True
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[placement]
os_region_name = RegionOne
project_domain_name = default
project_name = service
auth_type = password
user_domain_name = default
auth_url = http://controller:35357/v3
username = placement
password = placement-password

(确定计算节点是否支持虚拟机的硬件加速)
#egrep -c ‘(vmx|svm)’ /proc/cpuinfo
如果为0则需要修改
#vim /etc/nova/nova.conf
[libvirt]
virt_type = qemu

启动计算服务及其依赖,并将其配置为随系统自动启动:
#systemctl enable libvirtd.service openstack-nova-compute.service
#systemctl start libvirtd.service openstack-nova-compute.service

在控制节点上执行
#. admin-openrc
#openstack hypervisor list
发现计算节点
#su -s /bin/sh -c “nova-manage cell_v2 discover_hosts --verbose” nova
vim /etc/nova/nova.conf
  [scheduler]
  discover_hosts_in_cells_interval = 300

在控制节点验证:
#openstack compute service list

五:Networking服务
安装并配置控制节点
创建neutron数据库
mysql -uroot -p123456
MariaDB [(none)]> CREATE DATABASE neutron;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO ‘neutron’@‘controller’ IDENTIFIED BY ‘neutron-password’;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO ‘neutron’@’%’ IDENTIFIED BY ‘neutron-password’;
MariaDB [(none)]> flush privileges;
运行环境变量:
#. admin-openrc
创建neutron用户,密码为neutron-password
#openstack user create --domain default --password-prompt neutron
#openstack role add --project service --user neutron admin
#openstack service create --name neutron --description “OpenStack Networking” network
创建neutron网络服务API端点
#openstack endpoint create --region RegionOne network public http://controller:9696
#openstack endpoint create --region RegionOne network internal http://controller:9696
#openstack endpoint create --region RegionOne network admin http://controller:9696

安装neutron服务
#yum -y install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables
  
#vim /etc/neutron/neutron.conf
[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
transport_url = rabbit://openstack:[email protected]
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true

[database]
connection = mysql+pymysql://neutron:[email protected]/neutron

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username=neutron
password=neutron-password

[nova]
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova-password

[oslo_concurrency]
lock_path = /var/lib/neutron/tmp

配置ml2扩展:
#vim /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
mechanism_drivers = linuxbridge,l2population
extension_drivers = port_security

[ml2_type_flat]
flat_networks = provider

[ml2_type_vxlan]
vni_ranges = 1:1000

[securitygroup]
enable_ipset = true

配置网桥:
#vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini

[linux_bridge]
physical_interface_mappings = provider:eno33554960 #笔者这里网卡名称填的是192.168.1.135地址对应的网卡eno33554960
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

[vxlan]
enable_vxlan = true
local_ip = 192.168.1.135
l2_population = true

配置3层网络:
#vim /etc/neutron/l3_agent.ini
[DEFAULT]
interface_driver = linuxbridge

配置dhcp:
#vim /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true

配置元数据代理
#vim /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = METADATA_SECRET

修改配置控制节点的nova.conf文件
#vim /etc/nova/nova.conf
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron-password
service_metadata_proxy = True
metadata_proxy_shared_secret = METADATA_SECRET #元数据代理密码

创建扩展连接:
#ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
同步数据库
#su -s /bin/sh -c “neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head” neutron

重启服务:
#systemctl restart openstack-nova-api.service
#systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
#systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service

#systemctl enable neutron-l3-agent.service
#systemctl start neutron-l3-agent.service

安装并配置计算节点
#yum -y install openstack-neutron-linuxbridge ebtables ipset

#vim /etc/neutron/neutron.conf
[DEFAULT]
transport_url = rabbit://openstack:[email protected]
auth_strategy = keystone

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron-password

[oslo_concurrency]
lock_path = /var/lib/neutron/tmp

配置vxlan
#vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini

[linux_bridge]
physical_interface_mappings = provider:eno33554960 ##笔者这里网卡名称填的是192.168.1.136地址对应的网卡eno33554960

[vxlan]
enable_vxlan = true
local_ip = 192.168.1.136
l2_population = true

[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

修改配置计算节点的nova.conf文件
#vim /etc/nova/nova.conf

[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron-password

重启计算服务
#systemctl restart openstack-nova-compute.service
#systemctl enable neutron-linuxbridge-agent.service
#systemctl start neutron-linuxbridge-agent.service

验证(控制节点执行)
#. admin-openrc
#openstack extension list --network
#openstack network agent list
±-------------------------------------±-------------------±-----------±------------------±------±------±--------------------------+
| ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
±-------------------------------------±-------------------±-----------±------------------±------±------±--------------------------+
| 14c0e433-2c4b-4c3f-827e-b2923f9865cc | L3 agent | controller | nova | ???? | UP | neutron-l3-agent |
| 2cae088a-d493-45ff-8845-406d0bf33b6d | Metadata agent | controller | None | ???? | UP | neutron-metadata-agent |
| 3a2b193a-b6e7-43c4-97e2-6a47508ce707 | DHCP agent | controller | nova | ???? | UP | neutron-dhcp-agent |
| 8f94ade1-fe3b-49dd-a117-a7bd01de172d | Linux bridge agent | controller | None | ???? | UP | neutron-linuxbridge-agent |
| b49588ce-f3a9-40cd-bb69-9cba054be2b7 | Linux bridge agent | compute | None | ???? | UP | neutron-linuxbridge-agent |
±-------------------------------------±-------------------±-----------±------------------±------±------±--------------------------+

六:Dashboard 控制节点安装
#yum -y install openstack-dashboard
修改配置文件
#vim /etc/openstack-dashboard/local_settings

OPENSTACK_HOST = “controller”
ALLOWED_HOSTS = [’*’] #这里为了测试所以配置为允许所有ip
OPENSTACK_KEYSTONE_URL = “http://%s:5000/v3” % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_ROLE = “user”

SESSION_ENGINE = ‘django.contrib.sessions.backends.cache’

CACHES = {
  ‘default’: {
‘BACKEND’: ‘django.core.cache.backends.memcached.MemcachedCache’,
‘LOCATION’: ‘controller:11211’,
    },
   }

OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True

OPENSTACK_API_VERSIONS = {
        “identity”: 3,
        “image”: 2,
        “volume”: 2,
}

OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = “default”

TIME_ZONE = “UTC” (上海 CST)

为了页面访问dashboard,修改该文件
#vim /etc/httpd/conf.d/openstack-dashboard.conf
在WSGISocketPrefix run/wsgi下面加一行代码:
WSGIApplicationGroup %{GLOBAL}

重启服务
#systemctl restart httpd.service memcached.service

测试登录
在网页上输入网址http://192.168.1.135/dashboard/auth/login
域:default
用户名:admin 密码:admin-password

用户名:demo 密码:123456

七,部署存储节点Cinder
以下步骤在控制节点执行
创建cinder数据库
#mysql -u root -p123456
MariaDB [(none)]> CREATE DATABASE cinder;
授权
MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO ‘cinder’@‘controller’ IDENTIFIED BY ‘cinder-password’;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO ‘cinder’@’%’ IDENTIFIED BY ‘cinder-password’;

运行环境变量
. admin-openrc
创建cinder用户以及服务端点
openstack user create --domain default --password-prompt cinder #(cinder-password)
openstack role add --project service --user cinder admin
openstack service create --name cinderv2 --description “OpenStack Block Storage” volumev2
openstack service create --name cinderv3 --description “OpenStack Block Storage” volumev3
openstack endpoint create --region RegionOne volumev2 public http://controller:8776/v2/%(project_id)s
openstack endpoint create --region RegionOne volumev2 internal http://controller:8776/v2/%(project_id)s
openstack endpoint create --region RegionOne volumev2 admin http://controller:8776/v2/%(project_id)s
openstack endpoint create --region RegionOne volumev3 public http://controller:8776/v3/%(project_id)s
openstack endpoint create --region RegionOne volumev3 internal http://controller:8776/v3/%(project_id)s
openstack endpoint create --region RegionOne volumev3 admin http://controller:8776/v3/%(project_id)s

安装cinder服务
#yum -y install openstack-cinder
#vim /etc/cinder/cinder.conf

[database]
connection = mysql+pymysql://cinder:[email protected]/cinder

[DEFAULT]
transport_url = rabbit://openstack:[email protected]
auth_strategy = keystone
my_ip = 192.168.1.135

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder-password

[oslo_concurrency]
lock_path = /var/lib/cinder/tmp

同步数据库
#su -s /bin/sh -c “cinder-manage db sync” cinder

修改控制节点的nova.conf文件
#vim /etc/nova/nova.conf
[cinder]
os_region_name=RegionOne

重启服务
#systemctl restart openstack-nova-api.service
#systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
#systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service

以下步骤在cinder节点执行
#yum -y install lvm2 #一般centos系统自带该服务
#systemctl enable lvm2-lvmetad.service
#systemctl start lvm2-lvmetad.service

笔者这里使用fdisk /dev/sda 命令,新增了一个sda4的分区,并用mkfs.xfs格式化了sda4。最后再用pvcreate创建的。
#fdisk -l
#Start End Size Type Name
1 2048 4095 1M BIOS boot
2 4096 1028095 500M Microsoft basic
3 1028096 4101912575 1.9T Linux LVM
4 4101912576 6800000000 1.3T Linux filesyste

#pvcreate /dev/sda4
(Physical volume “/dev/sda4” successfully created)
#vgcreate cinder-volumes /dev/sda
(Volume group “cinder-volumes” successfully created)

#vim /etc/lvm/lvm.conf
devices {
filter = [ “a/sda4/”,“a/sda3/”, “r/.*/”] #因fisk -l 查看时发现sda3的类型是LVM,所以这里也将sda3加进过滤器

安装cinder服务
yum -y install openstack-cinder targetcli python-keystone
修改配置文件
#vim /etc/cinder/cinder.conf

[DEFAULT]
transport_url = rabbit://openstack:[email protected]
auth_strategy = keystone
my_ip = 192.168.1.137
enabled_backends = lvm
glance_api_servers = http://controller:9292

[database]
connection = mysql+pymysql://cinder:[email protected]/cinder

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder-password

(如果没有lvm选项。直接添加一个)
[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
iscsi_protocol = iscsi
iscsi_helper = lioadm

[oslo_concurrency]
lock_path = /var/lib/cinder/tmp

#systemctl enable openstack-cinder-volume.service target.service
#systemctl start openstack-cinder-volume.service target.service

控制节点验证
#. admin-openrc
#openstack volume service list

±-----------------±-----------±-----±--------±------±---------------------------+
| Binary | Host | Zone | Status | State | Updated_at |
±-----------------±-----------±-----±--------±------±---------------------------+
| cinder-scheduler | controller | nova | enabled | up | 2019-04-3T02:27:41.000000 |
| cinder-volume | [email protected] | nova | enabled | up | 2019-04-3T02:27:46.000000 |
±-----------------±-----------±-----±--------±------±---------------------------+

.
.
.
.
.
.
.
.
.
.

.

补充:
如需要重启所有服务,可按以下步骤

systemctl restart memcached httpd

glance:服务
controller节点 systemctl restart openstack-glance-api.service openstack-glance-registry.service

nova服务
controller节点 systemctl restart openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
compute节点 systemctl restart libvirtd.service openstack-nova-compute.service

network服务
controller节点 systemctl restart neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service neutron-l3-agent.service
compute节点 systemctl restart openstack-nova-compute.service neutron-linuxbridge-agent.service

cinder服务
controller节点 systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service
cinder节点 systemctl restart openstack-cinder-volume.service target.service

命令行创建外部网络
. admin-openrc

openstack network create --share --external
–provider-physical-network provider
–provider-network-type flat provider

相关文章: