openstack+skyline+ceph环境ALL IN ONE部署
ubuntu版本: 22.04 lts
1-集群初始化-环境准备
root@ubuntu:~# cat /etc/netplan/00-installer-config.yaml
# This is the network config written by 'subiquity'
network:
ethernets:
ens33:
dhcp4: no
addresses:
- 192.168.0.88/24
routes:
- to: default
via: 192.168.8.2
nameservers:
addresses: [223.5.5.5,114.114.114.114]
ens34:
dhcp4: true
version: 2
root@ubuntu:~# netplan apply
WARNING:root:Cannot call Open vSwitch: ovsdb-server.service is not running.
# 修改网络地址
root@ubuntu:~# grep -Ev '#|^$' /etc/hosts
127.0.0.1 localhost
127.0.1.1 ubuntu
::1 ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
192.168.0.88 controller
192.168.0.88 compute
192.168.0.88 cinder
192.168.0.88 swift
192.168.0.88 ceph
# 配置名称解析
root@ubuntu:~# apt install chrony
root@ubuntu:~# timedatectl set-timezone "Asia/Shanghai"
root@ubuntu:~# date
Wed May 15 09:48:28 AM CST 2024
root@ubuntu:~# systemctl enable --now chrony
Synchronizing state of chrony.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable chrony
# 配置时间同步
2-安装基础软件
root@ubuntu:~# add-apt-repository cloud-archive:zed
# 启用存档 适用于 Ubuntu 22.04 LTS 的 OpenStack Zed
root@ubuntu:~# apt install nova-compute -y
root@ubuntu:~# apt install python3-openstackclient -y
# 安装示例
数据库安装
root@ubuntu:~# apt install mariadb-server python3-pymysql -y
root@ubuntu:~# cat > /etc/mysql/mariadb.conf.d/99-openstack.cnf <<EOF
[mysqld]
bind-address = 192.168.0.88
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
EOF
# 配置mysql
root@ubuntu:~# systemctl enable --now mysql
root@ubuntu:~# systemctl restart mysql
# 设置服务自启并重启
root@ubuntu:~# mysql_secure_installation
n
y
000000
000000
n
y
y
# 完成安装
消息队列安装
root@ubuntu:~# apt install rabbitmq-server -y
root@ubuntu:~# rabbitmqctl add_user openstack 000000
Adding user "openstack" ...
Done. Don't forget to grant the user permissions to some virtual hosts! See 'rabbitmqctl help set_permissions' to learn more.
root@ubuntu:~# rabbitmqctl set_permissions openstack ".*" ".*" ".*"
Setting permissions for user "openstack" in vhost "/" ...
# 添加openstack用户并设置权限
内存缓存安装
root@ubuntu:~# apt install memcached python3-memcache -y
root@ubuntu:~# sed -i "s/-l 127.0.0.1/-l 192.168.0.88/g" /etc/memcached.conf
root@ubuntu:~# systemctl enable --now memcached
Synchronizing state of memcached.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable memcached
root@ubuntu:~# systemctl restart memcached
etcd安装
root@ubuntu:~# apt install etcd -y
root@ubuntu:~# mkdir /opt/bak
root@ubuntu:~# cp /etc/default/etcd /opt/bak/etcd.bak
# 备份文件
root@ubuntu:~# cat > /etc/default/etcd <<EOF
ETCD_NAME="controller"
ETCD_DATA_DIR="/var/lib/etcd"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"
ETCD_INITIAL_CLUSTER="controller=http://192.168.0.88:2380"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.0.88:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.0.88:2379"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.0.88:2379"
EOF
root@ubuntu:~# systemctl enable --now etcd
Synchronizing state of etcd.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable etcd
root@ubuntu:~# systemctl restart etcd
3-openstack部署
keystone部署
root@ubuntu:~# mysql -u root -p000000 -e "show databases;"
+--------------------+
| Database |
+--------------------+
| information_schema |
| mysql |
| performance_schema |
| sys |
+--------------------+
root@ubuntu:~# mysql -u root -p000000 -e "CREATE DATABASE keystone;"
root@ubuntu:~# mysql -u root -p000000 -e "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \
IDENTIFIED BY '000000';"
root@ubuntu:~# mysql -u root -p000000 -e "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \
IDENTIFIED BY '000000';"
root@ubuntu:~# mysql -u root -p000000 -e "use mysql; select user,host from user;"
+-------------+-----------+
| User | Host |
+-------------+-----------+
| keystone | % |
| keystone | localhost |
| mariadb.sys | localhost |
| mysql | localhost |
| root | localhost |
+-------------+-----------+
# 创建keystone数据库
root@ubuntu:~# apt install keystone -y
root@ubuntu:~# cp /etc/keystone/keystone.conf /opt/bak/keystone.conf.bak
# 备份重要配置
root@ubuntu:~# sed -i "s#connection = sqlite:var/lib/keystone/keystone.db#connection = mysql+pymysql://keystone:000000@controller/keystone#g" /etc/keystone/keystone.conf
root@ubuntu:~# sed -n '/^\[token\]$/p' /etc/keystone/keystone.conf
[token]
root@ubuntu:~# sed -i '/^\[token\]$/a\provider = fernet' /etc/keystone/keystone.conf
# 修改配置文件
root@ubuntu:~# grep -Ev '^$|#' /etc/keystone/keystone.conf
[DEFAULT]
log_dir = /var/log/keystone
[application_credential]
[assignment]
[auth]
[cache]
[catalog]
[cors]
[credential]
[database]
connection = mysql+pymysql://keystone:000000@controller/keystone
[domain_config]
[endpoint_filter]
[endpoint_policy]
[eventlet_server]
[extra_headers]
Distribution = Ubuntu
[federation]
[fernet_receipts]
[fernet_tokens]
[healthcheck]
[identity]
[identity_mapping]
[jwt_tokens]
[ldap]
[memcache]
[oauth1]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[policy]
[profiler]
[receipt]
[resource]
[revoke]
[role]
[saml]
[security_compliance]
[shadow_users]
[token]
provider = fernet
[tokenless_auth]
[totp]
[trust]
[unified_limit]
[wsgi]
# 过滤文件生效部分
root@ubuntu:~# su -s /bin/sh -c "keystone-manage db_sync" keystone
# 填充数据库
root@ubuntu:~# mysql -ukeystone -p000000 -hcontroller -e "use keystone;show tables;"
...
# 检查是否成功填充
root@ubuntu:~# keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
root@ubuntu:~# keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
# 初始化 Fernet 密钥存储库
root@ubuntu:~# keystone-manage bootstrap --bootstrap-password 000000 \
--bootstrap-admin-url http://controller:5000/v3/ \
--bootstrap-internal-url http://controller:5000/v3/ \
--bootstrap-public-url http://controller:5000/v3/ \
--bootstrap-region-id RegionOne
# 引导身份服务
root@ubuntu:~# grep -i servername /etc/apache2/apache2.conf
root@ubuntu:~# echo "ServerName controller" >> /etc/apache2/apache2.conf
root@ubuntu:~# systemctl enable --now apache2
Synchronizing state of apache2.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable apache2
root@ubuntu:~# systemctl restart apache2
# 配置apache2
root@ubuntu:~# cat >> /etc/keystone/admin-openrc.sh <<EOF
export OS_USERNAME=admin
export OS_PASSWORD=000000
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
EOF
root@ubuntu:~# echo "source /etc/keystone/admin-openrc.sh" >> /etc/profile
# 设置环境变量
root@ubuntu:~# source /etc/keystone/admin-openrc.sh
# 生效环境变量
root@ubuntu:~# openstack domain create --description "An Example Domain" example
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | An Example Domain |
| enabled | True |
| id | 48689f5f9b144dd1a1fd144d9d33e94d |
| name | example |
| options | {} |
| tags | [] |
+-------------+----------------------------------+
# 创建example域
root@ubuntu:~# openstack project create --domain default \
--description "Service Project" service
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | Service Project |
| domain_id | default |
| enabled | True |
| id | 8769f99597894297a32c2c3bab870453 |
| is_domain | False |
| name | service |
| options | {} |
| parent_id | default |
| tags | [] |
+-------------+----------------------------------+
#创建service 项目
root@ubuntu:~# openstack project create --domain default \
--description "Demo Project" myproject
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | Demo Project |
| domain_id | default |
| enabled | True |
| id | 6700047250fc47998b7fc31524d42a07 |
| is_domain | False |
| name | myproject |
| options | {} |
| parent_id | default |
| tags | [] |
+-------------+----------------------------------+
# 创建myproject项目
root@ubuntu:~# openstack user create --domain default \
--password-prompt myuser
User Password:
Repeat User Password:
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | 5cb1bc5e71c84e74b97f3575a1c08269 |
| name | myuser |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+
# 创建myuser用户
root@ubuntu:~# openstack role create myrole
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | None |
| domain_id | None |
| id | d2379713f54b47fe94bb5bbce4390ca8 |
| name | myrole |
| options | {} |
+-------------+----------------------------------+
# 创建myrole角色
root@ubuntu:~# openstack role add --project myproject --user myuser myrole
# 添加myrole角色到myproject项目和myuser用户
root@ubuntu:~# openstack token issue
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field | Value |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| expires | 2024-05-15T03:44:10+0000 |
| id | gAAAAABmRCF6Zmox7_-PMt34COcOTlsS_nTlzEBBGsB_K9st_IhEZVlw-QuNoLfiXeHAHIx9wvJ_5aXMs9q0bg6khynBj7YkxzyeXYXzeiPRdsTNBjUdtvXDz3rcsOKK3FqcfSdUpB5MKRh1EmAIrYyM3Uqbx6nw-75AU1vae5jvlfcSI7mFfAE |
| project_id | 16368b5786184f7fba25846b7ffecb0b |
| user_id | 7e0d58d6c1d242c0a096fcd77482d0b0 |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
glance部署
root@ubuntu:~# mysql -u root -p000000 -e "CREATE DATABASE glance;"
root@ubuntu:~# mysql -u root -p000000 -e "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \
IDENTIFIED BY '000000';"
root@ubuntu:~# mysql -u root -p000000 -e "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \
IDENTIFIED BY '000000';"
root@ubuntu:~# mysql -uroot -p000000 -e "use mysql; select user,host from user;" |grep glance
glance %
glance localhost
# 创建glance数据库并配置访问权限
root@ubuntu:~# source /etc/keystone/admin-openrc.sh
root@ubuntu:~# openstack user create --domain default --password-prompt glance
User Password:
Repeat User Password:
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | 00909b4245ed456487c43229a0c23b52 |
| name | glance |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+
# 创建glance用户
root@ubuntu:~# openstack role add --project service --user glance admin
# 将角色添加admin到glance用户和 service项目中
root@ubuntu:~# openstack service create --name glance \
--description "OpenStack Image" image
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Image |
| enabled | True |
| id | 9f546eda878e4a2fad9e744ea21e8d2c |
| name | glance |
| type | image |
+-------------+----------------------------------+
# 创建glance服务实体
root@ubuntu:~# openstack endpoint create --region RegionOne \
image public http://controller:9292
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | b33fe92ee1d944a4b4b934cd2df2d471 |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 9f546eda878e4a2fad9e744ea21e8d2c |
| service_name | glance |
| service_type | image |
| url | http://controller:9292 |
+--------------+----------------------------------+
root@ubuntu:~# openstack endpoint create --region RegionOne \
image internal http://controller:9292
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 45972b33852140efaf13c48e61fc5128 |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 9f546eda878e4a2fad9e744ea21e8d2c |
| service_name | glance |
| service_type | image |
| url | http://controller:9292 |
+--------------+----------------------------------+
root@ubuntu:~# openstack endpoint create --region RegionOne \
image admin http://controller:9292
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 3e06d5485658496b83c58b7549eae6a2 |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 9f546eda878e4a2fad9e744ea21e8d2c |
| service_name | glance |
| service_type | image |
| url | http://controller:9292 |
+--------------+----------------------------------+
# 创建图像服务 API 端点
root@ubuntu:~# apt install glance -y
root@ubuntu:~# cp /etc/glance/glance-api.conf /opt/bak/glance-api.conf.bak
# 备份文件
root@ubuntu:~# sed -i "s#connection = sqlite:var/lib/glance/glance.sqlite#connection = mysql+pymysql://glance:000000@controller/glance#g" /etc/glance/glance-api.conf
root@ubuntu:~# sed -n "/^\[keystone_authtoken]/p" /etc/glance/glance-api.conf
[keystone_authtoken]
root@ubuntu:~# awk '/\[keystone_authtoken\]/{print; print "www_authenticate_uri = http://controller:5000"; print "auth_url = http://controller:5000"; print "memcached_servers = controller:11211"; print "auth_type = password"; print "project_domain_name = Default"; print "user_domain_name = Default"; print "project_name = service"; print "username = glance"; print "password = 000000"; next}1' /etc/glance/glance-api.conf > tmp.conf && mv tmp.conf /etc/glance/glance-api.conf
root@ubuntu:~# awk '/\[paste_deploy\]/{flag=1} flag && NF==0{flag=0; print "flavor = keystone"} flag{print} !flag{print} END{if(flag) print "flavor = keystone"}' /etc/glance/glance-api.conf > tmp.conf && mv tmp.conf /etc/glance/glance-api.conf
root@ubuntu:~# awk '
/^\[glance_store\]/ {
print;
print "stores = file,http";
print "default_store = file";
print "filesystem_store_datadir = /var/lib/glance/images/";
next
}
{ print }
' /etc/glance/glance-api.conf > tmp.conf && mv tmp.conf /etc/glance/glance-api.conf
root@ubuntu:~# awk '
/^\[oslo_limit\]/ {
print;
print "auth_url = http://controller:5000";
print "auth_type = password";
print "user_domain_id = default";
print "username = glance";
print "system_scope = all";
print "password = 000000";
print "endpoint_id = 340be3625e9b4239a6415d034e98aace";
print "region_name = RegionOne";
next
}
{ print }
' /etc/glance/glance-api.conf > tmp.conf && mv tmp.conf /etc/glance/glance-api.conf
root@ubuntu:~# grep -Ev "^$|#" /etc/glance/glance-api.conf
[DEFAULT]
[barbican]
[barbican_service_user]
[cinder]
[cors]
[database]
connection = mysql+pymysql://glance:000000@controller/glance
backend = sqlalchemy
[file]
[glance.store.http.store]
[glance.store.rbd.store]
[glance.store.s3.store]
[glance.store.swift.store]
[glance.store.vmware_datastore.store]
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
[healthcheck]
[image_format]
disk_formats = ami,ari,aki,vhd,vhdx,vmdk,raw,qcow2,vdi,iso,ploop.root-tar
[key_manager]
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = 000000
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = 000000
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = 000000
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = 000000
[os_brick]
[oslo_concurrency]
[oslo_limit]
auth_url = http://controller:5000
auth_type = password
user_domain_id = default
username = glance
system_scope = all
password = 000000
endpoint_id = 340be3625e9b4239a6415d034e98aace
region_name = RegionOne
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[paste_deploy]
flavor = keystone
[profiler]
[store_type_location_strategy]
[task]
[taskflow_executor]
[vault]
[wsgi]
# 修改配置文件
root@ubuntu:~# source /etc/keystone/admin-openrc.sh
root@ubuntu:~# openstack role add --user glance --user-domain Default --system all reader
# 确保glance帐户具有对系统范围资源(如限制)的读者访问权限
root@ubuntu:~# su -s /bin/sh -c "glance-manage db_sync" glance
# 填充图像服务数据库
root@ubuntu:~# systemctl enable --now glance-api
Synchronizing state of glance-api.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable glance-api
root@ubuntu:~# systemctl restart glance-api
# 重启glance-api服务
root@ubuntu:~# glance image-create --name "cirros" --file cirros-0.4.0-x86_64-disk.img --disk-format qcow2 --container-format bare --visibility=public
...
root@ubuntu:~# glance image-list
+--------------------------------------+--------+
| ID | Name |
+--------------------------------------+--------+
| c9fe6431-c174-4688-b60b-dcd2b4bb29cb | cirros |
+--------------------------------------+--------+
# 上传镜像测试服务
placement部署
root@ubuntu:~# mysql -uroot -p000000 -e "CREATE DATABASE placement;"
root@ubuntu:~# mysql -uroot -p000000 -e "GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' \
IDENTIFIED BY '000000';"
root@ubuntu:~# mysql -uroot -p000000 -e "GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' \
IDENTIFIED BY '000000';"
root@ubuntu:~# mysql -uroot -p000000 -e "use mysql; select user,host from user;" |grep placement
placement %
placement localhost
# 创建数据库及用户
root@ubuntu:~# openstack user create --domain default --password-prompt placement
User Password:
Repeat User Password:
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | f6ad1e3e434847d09fac4bee460d87bb |
| name | placement |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+
# 创建一个放置服务用户
root@ubuntu:~# openstack role add --project service --user placement admin
# 将 Placement 用户添加到具有 admin 角色的服务项目
root@ubuntu:~# openstack service create --name placement \
--description "Placement API" placement
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | Placement API |
| enabled | True |
| id | f0826a8f8c61483192afac598ff7a206 |
| name | placement |
| type | placement |
+-------------+----------------------------------+
# 在服务目录中创建 Placement API 条目
root@ubuntu:~# openstack endpoint create --region RegionOne \
placement public http://controller:8778
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | a5e72a74f1634932a8ad5397231465ca |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | f0826a8f8c61483192afac598ff7a206 |
| service_name | placement |
| service_type | placement |
| url | http://controller:8778 |
+--------------+----------------------------------+
root@ubuntu:~# openstack endpoint create --region RegionOne \
placement internal http://controller:8778
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 0955ac4f393c4b56b4f6833cd4cb19d5 |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | f0826a8f8c61483192afac598ff7a206 |
| service_name | placement |
| service_type | placement |
| url | http://controller:8778 |
+--------------+----------------------------------+
root@ubuntu:~# openstack endpoint create --region RegionOne \
placement admin http://controller:8778
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 5dff271fb0ba4dd9b32faa2097f9a690 |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | f0826a8f8c61483192afac598ff7a206 |
| service_name | placement |
| service_type | placement |
| url | http://controller:8778 |
+--------------+----------------------------------+
# 创建 Placement API 服务端点
root@ubuntu:~# apt install placement-api -y
root@ubuntu:~# sed -i "s#connection = sqlite:var/lib/placement/placement.sqlite#connection = mysql+pymysql://placement:000000@controller/placement#g" /etc/placement/placement.conf
root@ubuntu:~# sed -i "/^\[api\]/a\auth_strategy = keystone" /etc/placement/placement.conf
root@ubuntu:~# sed -i "/^\[keystone_authtoken\]$/a\auth_url = http://controller:5000/v3\nmemcached_servers = controller:11211\nauth_type = password\nproject_domain_name = Default\nuser_domain_name = Default\nproject_name = service\nusername = placement\npassword = 000000" /etc/placement/placement.conf
root@ubuntu:~# grep -Ev '^$|#' /etc/placement/placement.conf
[DEFAULT]
[api]
auth_strategy = keystone
[cors]
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = 000000
[oslo_middleware]
[oslo_policy]
[placement]
[placement_database]
connection = mysql+pymysql://placement:000000@controller/placement
[profiler]
# 修改配置文件
root@ubuntu:~# su -s /bin/sh -c "placement-manage db sync" placement
# 填充数据库
root@ubuntu:~# systemctl restart apache2
root@ubuntu:~# placement-status upgrade check
+-------------------------------------------+
| Upgrade Check Results |
+-------------------------------------------+
| Check: Missing Root Provider IDs |
| Result: Success |
| Details: None |
+-------------------------------------------+
| Check: Incomplete Consumers |
| Result: Success |
| Details: None |
+-------------------------------------------+
| Check: Policy File JSON to YAML Migration |
| Result: Success |
| Details: None |
+-------------------------------------------+
# 验证状态
nova部署
控制节点配置
root@ubuntu:~# mysql -uroot -p000000 -e "CREATE DATABASE nova;" && mysql -uroot -p000000 -e "CREATE DATABASE nova_api;" && mysql -uroot -p000000 -e "CREATE DATABASE nova_cell0;;" && mysql -uroot -p000000 -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY '000000';" && mysql -uroot -p000000 -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY '000000';" && mysql -uroot -p000000 -e "GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY '000000';" && mysql -uroot -p000000 -e "GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY '000000';" mysql -uroot -p000000 -e "GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY '000000';" && mysql -uroot -p000000 -e "GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY '000000';"
root@ubuntu:~# mysql -u root -p000000 -e "use mysql;select user,host from user;" |grep nova
nova %
nova localhost
# 创建数据库及用户
root@ubuntu:~# openstack user create --domain default --password-prompt nova
User Password:
Repeat User Password:
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | 0fd49a2107e94802b4975d4296e72af6 |
| name | nova |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+
# 创建nova用户
root@ubuntu:~# openstack role add --project service --user nova admin
# admin为用户添加角色nova
root@ubuntu:~# openstack service create --name nova \
--description "OpenStack Compute" compute
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Compute |
| enabled | True |
| id | 40ccef034ef946d7b0dd3e60045d0ad3 |
| name | nova |
| type | compute |
+-------------+----------------------------------+
# 创建nova服务实体
root@ubuntu:~# openstack endpoint create --region RegionOne \
compute public http://controller:8774/v2.1
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 483d4ed5353a445fb659b89622d0b487 |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 40ccef034ef946d7b0dd3e60045d0ad3 |
| service_name | nova |
| service_type | compute |
| url | http://controller:8774/v2.1 |
+--------------+----------------------------------+
root@ubuntu:~# openstack endpoint create --region RegionOne \
compute internal http://controller:8774/v2.1
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 42be119addd747f3951e1f77d9111f42 |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 40ccef034ef946d7b0dd3e60045d0ad3 |
| service_name | nova |
| service_type | compute |
| url | http://controller:8774/v2.1 |
+--------------+----------------------------------+
root@ubuntu:~# openstack endpoint create --region RegionOne \
compute admin http://controller:8774/v2.1
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | fe0b6e456b8a4bcf84eb63d8da1154dc |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 40ccef034ef946d7b0dd3e60045d0ad3 |
| service_name | nova |
| service_type | compute |
| url | http://controller:8774/v2.1 |
+--------------+----------------------------------+
# 创建计算 API 服务端点
root@ubuntu:~# apt install nova-api nova-conductor nova-novncproxy nova-scheduler -y
root@ubuntu:~# cp /etc/nova/nova* /opt/bak/
# 备份文件
# root@ubuntu:~# sed -i "s#connection = sqlite:var/lib/nova/nova_api.sqlite#connection = mysql+pymysql://nova:000000@controller/nova_api#g" /etc/nova/nova.conf
# root@ubuntu:~# sed -i "s#connection = sqlite:var/lib/nova/nova.sqlite#connection = mysql+pymysql://nova:000000@controller/nova#g" /etc/nova/nova.conf
# root@ubuntu:~# sed -i "/^\[api]/a\auth_strategy = keystone" /etc/nova/nova.conf
# # 我直接修改文件展示如下,不再使用命令非交互式修改
root@ubuntu:~# grep -Ev '^$|#' /etc/nova/nova.conf
[DEFAULT]
my_ip = 192.168.0.88
log_dir = /var/log/nova
lock_path = /var/lock/nova
state_path = /var/lib/nova
[api]
auth_strategy = keystone
[api_database]
connection = mysql+pymysql://nova:000000@controller/nova_api
[barbican]
[barbican_service_user]
[cache]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[cyborg]
[database]
connection = mysql+pymysql://nova:000000@controller/nova
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://controller:9292
[guestfs]
[healthcheck]
[hyperv]
[image_cache]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = 000000
[libvirt]
[metrics]
[mks]
[neutron]
[notifications]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_limit]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[pci]
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = 000000
[privsep]
[profiler]
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
send_service_user_token = true
auth_url = https://controller/identity
auth_strategy = keystone
auth_type = password
project_domain_name = Default
project_name = service
user_domain_name = Default
username = nova
password = 000000
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
server_listen = 192.168.0.88
server_proxyclient_address = 192.168.0.88_
[workarounds]
[wsgi]
[zvm]
[cells]
enable = False
[os_region_name]
openstack =
# 直接替换文件为上述内容
root@ubuntu:~# su -s /bin/sh -c "nova-manage api_db sync" nova
Modules with known eventlet monkey patching issues were imported prior to eventlet monkey patching: urllib3. This warning can usually be ignored if the caller is only importing and not executing nova code.
root@ubuntu:~# su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
Modules with known eventlet monkey patching issues were imported prior to eventlet monkey patching: urllib3. This warning can usually be ignored if the caller is only importing and not executing nova code.
root@ubuntu:~# su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
Modules with known eventlet monkey patching issues were imported prior to eventlet monkey patching: urllib3. This warning can usually be ignored if the caller is only importing and not executing nova code.
--transport-url not provided in the command line, using the value [DEFAULT]/transport_url from the configuration file
--database_connection not provided in the command line, using the value [database]/connection from the configuration file
b67965e5-ff18-4062-89ca-52d942f820a9
root@ubuntu:~# su -s /bin/sh -c "nova-manage db sync" nova
Modules with known eventlet monkey patching issues were imported prior to eventlet monkey patching: urllib3. This warning can usually be ignored if the caller is only importing and not executing nova code.
root@ubuntu:~# su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
Modules with known eventlet monkey patching issues were imported prior to eventlet monkey patching: urllib3. This warning can usually be ignored if the caller is only importing and not executing nova code.
+-------+--------------------------------------+---------------+-------------------------------------------------+----------+
| Name | UUID | Transport URL | Database Connection | Disabled |
+-------+--------------------------------------+---------------+-------------------------------------------------+----------+
| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@controller/nova_cell0 | False |
| cell1 | b67965e5-ff18-4062-89ca-52d942f820a9 | rabbit: | mysql+pymysql://nova:****@controller/nova | False |
+-------+--------------------------------------+---------------+-------------------------------------------------+----------+
# 填充数据库
root@ubuntu:~# systemctl enable --now nova-api nova-scheduler nova-conductor nova-novncproxy
Synchronizing state of nova-api.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable nova-api
Synchronizing state of nova-scheduler.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable nova-scheduler
Synchronizing state of nova-conductor.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable nova-conductor
Synchronizing state of nova-novncproxy.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable nova-novncproxy
root@ubuntu:~# systemctl restart nova-api nova-scheduler nova-conductor nova-novncproxy
# 重启服务并设置开机自启
计算节点配置
root@ubuntu:~# grep -Ev '^$|#' /etc/nova/nova.conf
[DEFAULT]
my_ip = 192.168.0.88
transport_url = rabbit://openstack:000000@controller
log_dir = /var/log/nova
lock_path = /var/lock/nova
state_path = /var/lib/nova
[api]
auth_strategy = keystone
[api_database]
connection = mysql+pymysql://nova:000000@controller/nova_api
[barbican]
[barbican_service_user]
[cache]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[cyborg]
[database]
connection = mysql+pymysql://nova:000000@controller/nova
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://controller:9292
[guestfs]
[healthcheck]
[hyperv]
[image_cache]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = 000000
[libvirt]
[metrics]
[mks]
[neutron]
[notifications]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_limit]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[pci]
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = 000000
[privsep]
[profiler]
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
send_service_user_token = true
auth_url = https://controller/identity
auth_strategy = keystone
auth_type = password
project_domain_name = Default
project_name = service
user_domain_name = Default
username = nova
password = 000000
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = 192.168.0.88
novncproxy_base_url = http://controller:6080/vnc_auto.html
[workarounds]
[wsgi]
[zvm]
[cells]
enable = False
[os_region_name]
openstack =
# 替换配置文件为上述内容
root@ubuntu:~# egrep -c '(vmx|svm)' /proc/cpuinfo
0
root@ubuntu:~# sed -i "s/virt_type=kvm/virt_type=qemu/g" /etc/nova/nova-compute.conf
# 如果第一条命令的返回值不为0则不用打第二条命令
# 第二条命令的意思是libvirt使用kvm切换为qemu
root@ubuntu:~# systemctl enable --now nova-compute
Synchronizing state of nova-compute.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable nova-compute
root@ubuntu:~# systemctl restart nova-compute
# 重启服务并设置开机自启
root@ubuntu:~# openstack compute service list --service nova-compute
+--------------------------------------+--------------+--------+------+---------+-------+------------+
| ID | Binary | Host | Zone | Status | State | Updated At |
+--------------------------------------+--------------+--------+------+---------+-------+------------+
| f72e027d-90dc-48f6-9033-c36c700e1006 | nova-compute | ubuntu | nova | enabled | up | None |
+--------------------------------------+--------------+--------+------+---------+-------+------------+
root@ubuntu:~# su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
Modules with known eventlet monkey patching issues were imported prior to eventlet monkey patching: urllib3. This warning can usually be ignored if the caller is only importing and not executing nova code.
Found 2 cell mappings.
Skipping cell0 since it does not contain hosts.
Getting computes from cell 'cell1': b67965e5-ff18-4062-89ca-52d942f820a9
Checking host mapping for compute host 'ubuntu': 0b586a9e-9041-45d1-9550-7052a2510776
Creating host mapping for compute host 'ubuntu': 0b586a9e-9041-45d1-9550-7052a2510776
Found 1 unmapped computes in cell: b67965e5-ff18-4062-89ca-52d942f820a9
# 发现计算主机
root@ubuntu:~# sed -n "/^\[scheduler]/p" /etc/nova/nova.conf
[scheduler]
root@ubuntu:~# sed -i "/^\[scheduler]/a\discover_hosts_in_cells_interval = 300" /etc/nova/nova.conf
# 添加间隔时间
root@ubuntu:~# openstack compute service list
+--------------------------------------+----------------+--------+----------+---------+-------+----------------------------+
| ID | Binary | Host | Zone | Status | State | Updated At |
+--------------------------------------+----------------+--------+----------+---------+-------+----------------------------+
| 3e8fe948-8c53-4b9a-9c66-1aed45911cbb | nova-conductor | ubuntu | internal | enabled | up | 2024-05-15T04:51:13.000000 |
| 3015d1c5-260d-4c7b-b84f-b276168213ce | nova-scheduler | ubuntu | internal | enabled | up | 2024-05-15T04:51:13.000000 |
| f72e027d-90dc-48f6-9033-c36c700e1006 | nova-compute | ubuntu | nova | enabled | up | 2024-05-15T04:51:06.000000 |
+--------------------------------------+----------------+--------+----------+---------+-------+----------------------------+
root@ubuntu:~# openstack image list
+--------------------------------------+--------+--------+
| ID | Name | Status |
+--------------------------------------+--------+--------+
| c9fe6431-c174-4688-b60b-dcd2b4bb29cb | cirros | active |
+--------------------------------------+--------+--------+
root@ubuntu:~# nova-status upgrade check
Modules with known eventlet monkey patching issues were imported prior to eventlet monkey patching: urllib3. This warning can usually be ignored if the caller is only importing and not executing nova code.
+-------------------------------------------+
| Upgrade Check Results |
+-------------------------------------------+
| Check: Cells v2 |
| Result: Success |
| Details: None |
+-------------------------------------------+
| Check: Placement API |
| Result: Success |
| Details: None |
+-------------------------------------------+
| Check: Cinder API |
| Result: Success |
| Details: None |
+-------------------------------------------+
| Check: Policy File JSON to YAML Migration |
| Result: Success |
| Details: None |
+-------------------------------------------+
| Check: Older than N-1 computes |
| Result: Success |
| Details: None |
+-------------------------------------------+
| Check: hw_machine_type unset |
| Result: Success |
| Details: None |
+-------------------------------------------+
| Check: Service User Token Configuration |
| Result: Success |
| Details: None |
+-------------------------------------------+
# 检查集群状态
neutron部署
控制节点部署
root@ubuntu:~# mysql -uroot -p000000 -e "CREATE DATABASE neutron;"
root@ubuntu:~# mysql -uroot -p000000 -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \
IDENTIFIED BY '000000';"
root@ubuntu:~# mysql -uroot -p000000 -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \
IDENTIFIED BY '000000';"
# 创建数据库及用户
root@ubuntu:~# openstack user create --domain default --password-prompt neutron
User Password:
Repeat User Password:
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | 8b432eeb7e6745d6b790bc4bd93fea0e |
| name | neutron |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+
# 添加neutron用户
root@ubuntu:~# openstack role add --project service --user neutron admin
# admin为用户添加角色neutron
root@ubuntu:~# openstack service create --name neutron \
--description "OpenStack Networking" network
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Networking |
| enabled | True |
| id | 0d4d62f556f54f018ffdabd374e3981d |
| name | neutron |
| type | network |
+-------------+----------------------------------+
# 创建neutron服务实体
root@ubuntu:~# openstack endpoint create --region RegionOne \
network public http://controller:9696
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 80c6a112e7fe4fcba669725fa8a3f4f3 |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 0d4d62f556f54f018ffdabd374e3981d |
| service_name | neutron |
| service_type | network |
| url | http://controller:9696 |
+--------------+----------------------------------+
root@ubuntu:~# openstack endpoint create --region RegionOne \
network internal http://controller:9696
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 2bf91b277cea437f8f4a38aa47018ad7 |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 0d4d62f556f54f018ffdabd374e3981d |
| service_name | neutron |
| service_type | network |
| url | http://controller:9696 |
+--------------+----------------------------------+
root@ubuntu:~# openstack endpoint create --region RegionOne \
network admin http://controller:9696
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | ca929706ed4a4085ac79e46e4a81b8b3 |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 0d4d62f556f54f018ffdabd374e3981d |
| service_name | neutron |
| service_type | network |
| url | http://controller:9696 |
+--------------+----------------------------------+
# 创建网络服务 API 端点
root@ubuntu:~# apt install neutron-server neutron-plugin-ml2 neutron-openvswitch-agent neutron-l3-agent neutron-dhcp-agent neutron-metadata-agent -y
root@ubuntu:~# cp /etc/neutron/neutron.conf /opt/bak/neutron.conf.bak
root@ubuntu:~# cp /etc/neutron/plugins/ml2/ml2_conf.ini /opt/bak/ml2_conf.ini.bak
root@ubuntu:~# cp /etc/neutron/plugins/ml2/openvswitch_agent.ini /opt/bak/openvswitch_agent.ini.bak
root@ubuntu:~# cp /etc/neutron/l3_agent.ini /opt/bak/l3_agent.ini.bak
root@ubuntu:~# cp /etc/neutron/dhcp_agent.ini /opt/bak/dhcp_agent.ini.bak
# 备份配置文件
root@ubuntu:~# modprobe br_netfilter
root@ubuntu:~# lsmod | grep br_netfilter
br_netfilter 32768 0
bridge 307200 1 br_netfilter
# 加载内核模块
root@ubuntu:~# sysctl net.bridge.bridge-nf-call-iptables
net.bridge.bridge-nf-call-iptables = 1
root@ubuntu:~# sysctl net.bridge.bridge-nf-call-ip6tables
net.bridge.bridge-nf-call-ip6tables = 1
# 查看状态,如果不为1则需要打开
root@ubuntu:~# grep -Ev '^$|#' /etc/neutron/neutron.conf
[DEFAULT]
core_plugin = ml2
service_plugins = router
transport_url = rabbit://openstack:000000@controller
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[agent]
root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
[cache]
[cors]
[database]
connection = mysql+pymysql://neutron:000000@controller/neutron
[experimental]
[healthcheck]
[ironic]
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = neutron
password = 000000
[nova]
auth_url = http://controller:5000
auth_type = password
project_domain_name = Default
user_domain_name = Default
region_name = RegionOne
project_name = service
username = nova
password = 000000
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[placement]
[privsep]
[profiler]
[quotas]
[ssl]
# 修改neutron.conf
root@ubuntu:~# grep -Ev '^$|#' /etc/neutron/plugins/ml2/ml2_conf.ini
[DEFAULT]
[ml2]
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
mechanism_drivers = openvswitch,l2population
extension_drivers = port_security
[ml2_type_flat]
flat_networks = provider
vni_ranges = 1:1000
[ml2_type_geneve]
[ml2_type_gre]
[ml2_type_vlan]
[ml2_type_vxlan]
vni_ranges = 1:1000
[ovs_driver]
[securitygroup]
[sriov_driver]
# 修改ml2_conf.ini
root@ubuntu:~# grep -Ev "^$|#" /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = provider:br-provider
[network_log]
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
[vxlan]
enable_vxlan = true
local_ip = 192.168.0.88
l2_population = true
root@ubuntu:~# grep -Ev '^$|#' /etc/neutron/plugins/ml2/openvswitch_agent.ini
[DEFAULT]
[agent]
[dhcp]
[network_log]
[ovs]
bridge_mappings = provider:br-provider
[vxlan]
local_ip = 192.168.0.88
l2_population = true
[securitygroup]
enable_ipset = true
enable_security_group = true
firewall_driver = openvswitch
# 修改openvswitch_agent.ini
root@ubuntu:~# grep -Ev '^$|#' /etc/neutron/l3_agent.ini
[DEFAULT]
interface_driver = openvswitch
[agent]
[network_log]
[ovs]
# 修改l3_agent.ini
root@ubuntu:~# grep -Ev '^$|#' /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = openvswitch
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
[agent]
[ovs]
# 修改dhcp_agent.ini
root@ubuntu:~# cp /etc/neutron/metadata_agent.ini /opt/bak/
# 备份
root@ubuntu:~# grep -Ev '^$|#' /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = 000000
[agent]
[cache]
# 替换文件内容
root@ubuntu:~# grep -Ev '^$|#' /etc/nova/nova.conf
[DEFAULT]
my_ip = 192.168.0.88
transport_url = rabbit://openstack:000000@controller
log_dir = /var/log/nova
lock_path = /var/lock/nova
state_path = /var/lib/nova
[api]
auth_strategy = keystone
[api_database]
connection = mysql+pymysql://nova:000000@controller/nova_api
[barbican]
[barbican_service_user]
[cache]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[cyborg]
[database]
connection = mysql+pymysql://nova:000000@controller/nova
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://controller:9292
[guestfs]
[healthcheck]
[hyperv]
[image_cache]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = 000000
[libvirt]
[metrics]
[mks]
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = Default
user_domain_name = Default
region_name = RegionOne
project_name = service
username = neutron
password = 000000
service_metadata_proxy = true
metadata_proxy_shared_secret = 000000
[notifications]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_limit]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[pci]
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = 000000
[privsep]
[profiler]
[quota]
[rdp]
[remote_debug]
[scheduler]
discover_hosts_in_cells_interval = 300
[serial_console]
[service_user]
send_service_user_token = true
auth_url = https://controller/identity
auth_strategy = keystone
auth_type = password
project_domain_name = Default
project_name = service
user_domain_name = Default
username = nova
password = 000000
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = 192.168.0.88
novncproxy_base_url = http://192.168.0.88:6080/vnc_auto.html
[workarounds]
[wsgi]
[zvm]
[cells]
enable = False
[os_region_name]
openstack =
# 替换内容
root@ubuntu:~# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
--config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
# 填充数据库
root@ubuntu:~# systemctl restart nova-api
# 重启nova-api
root@ubuntu:~# systemctl enable --now neutron-server neutron-openvswitch-agent neutron-dhcp-agent neutron-metadata-agent neutron-l3-agent
Synchronizing state of neutron-server.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable neutron-server
Synchronizing state of neutron-openvswitch-agent.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable neutron-openvswitch-agent
Synchronizing state of neutron-dhcp-agent.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable neutron-dhcp-agent
Synchronizing state of neutron-metadata-agent.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable neutron-metadata-agent
Synchronizing state of neutron-l3-agent.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable neutron-l3-agent
root@ubuntu:~# systemctl restart neutron-server neutron-openvswitch-agent neutron-dhcp-agent neutron-metadata-agent neutron-l3-agent
# 重启服务并设置开机自启
计算节点部署
root@ubuntu:~# apt install neutron-openvswitch-agent -y
root@ubuntu:~# apt install neutron-linuxbridge-agent -y
root@ubuntu:~# ovs-vsctl add-br br-provider
root@ubuntu:~# ovs-vsctl add-port br-provider ens34
root@ubuntu:~# grep -Ev '^$|#' /etc/nova/nova.conf
[DEFAULT]
my_ip = 192.168.0.88
transport_url = rabbit://openstack:000000@controller
log_dir = /var/log/nova
lock_path = /var/lock/nova
state_path = /var/lib/nova
[api]
auth_strategy = keystone
[api_database]
connection = mysql+pymysql://nova:000000@controller/nova_api
[barbican]
[barbican_service_user]
[cache]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[cyborg]
[database]
connection = mysql+pymysql://nova:000000@controller/nova
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://controller:9292
[guestfs]
[healthcheck]
[hyperv]
[image_cache]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = 000000
[libvirt]
[metrics]
[mks]
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = Default
user_domain_name = Default
region_name = RegionOne
project_name = service
username = neutron
password = 000000
service_metadata_proxy = true
metadata_proxy_shared_secret = 000000
[notifications]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_limit]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[pci]
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = 000000
[privsep]
[profiler]
[quota]
[rdp]
[remote_debug]
[scheduler]
discover_hosts_in_cells_interval = 300
[serial_console]
#[service_user]
#send_service_user_token = true
#auth_url = https://controller/identity
#auth_strategy = keystone
#auth_type = password
#project_domain_name = Default
#project_name = service
#user_domain_name = Default
#username = nova
#password = 000000
# 这一块要注释掉,否则可能出现
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = 192.168.0.88
novncproxy_base_url = http://192.168.0.88:6080/vnc_auto.html
[workarounds]
[wsgi]
[zvm]
[cells]
enable = False
[os_region_name]
openstack =
# 替换文件内容
root@ubuntu:~# grep -Ev "^$|#" /etc/neutron/plugins/ml2/openvswitch_agent.ini
[DEFAULT]
[agent]
tunnel_types = vxlan
l2_population = true
[dhcp]
[network_log]
[ovs]
bridge_mappings = provider:br-provider
local_ip = 192.168.0.88
[vxlan]
local_ip = 192.168.0.88
l2_population = true
[securitygroup]
enable_security_group = true
firewall_driver = openvswitch
root@ubuntu:~# systemctl enable --now nova-compute
Synchronizing state of nova-compute.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable nova-compute
root@ubuntu:~# systemctl restart nova-compute
root@ubuntu:~# systemctl enable --now neutron-openvswitch-agent
Synchronizing state of neutron-openvswitch-agent.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable neutron-openvswitch-agent
root@ubuntu:~# systemctl restart neutron-openvswitch-agent
# 重启服务并设置开机自启
cinder部署
控制节点
root@ubuntu:~# mysql -uroot -p000000 -e "CREATE DATABASE cinder;"
root@ubuntu:~# mysql -uroot -p000000 -e "GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \
IDENTIFIED BY '000000';"
root@ubuntu:~# mysql -uroot -p000000 -e "GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \
IDENTIFIED BY '000000';"
# 创建数据库及用户
root@ubuntu:~# openstack user create --domain default --password-prompt cinder
User Password:
Repeat User Password:
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | 39cf38c9eb6547ac871cf83df0b0f30f |
| name | cinder |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+
# 创建cinder用户
root@ubuntu:~# openstack role add --project service --user cinder admin
# admin为用户添加角色cinder
root@ubuntu:~# openstack service create --name cinderv3 \
--description "OpenStack Block Storage" volumev3
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Block Storage |
| enabled | True |
| id | d1af95994a4542208268e279eba9ced7 |
| name | cinderv3 |
| type | volumev3 |
+-------------+----------------------------------+
# 创建cinderv3服务实体
root@ubuntu:~# openstack endpoint create --region RegionOne \
volumev3 public http://controller:8776/v3/%\(project_id\)s
+--------------+------------------------------------------+
| Field | Value |
+--------------+------------------------------------------+
| enabled | True |
| id | 1b639c79d566403a817afa64b2decee7 |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | d1af95994a4542208268e279eba9ced7 |
| service_name | cinderv3 |
| service_type | volumev3 |
| url | http://controller:8776/v3/%(project_id)s |
+--------------+------------------------------------------+
root@ubuntu:~# openstack endpoint create --region RegionOne \
volumev3 internal http://controller:8776/v3/%\(project_id\)s
+--------------+------------------------------------------+
| Field | Value |
+--------------+------------------------------------------+
| enabled | True |
| id | 3d7d01dd7ecf49938f98e3cb0a714080 |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | d1af95994a4542208268e279eba9ced7 |
| service_name | cinderv3 |
| service_type | volumev3 |
| url | http://controller:8776/v3/%(project_id)s |
+--------------+------------------------------------------+
root@ubuntu:~# openstack endpoint create --region RegionOne \
volumev3 admin http://controller:8776/v3/%\(project_id\)s
+--------------+------------------------------------------+
| Field | Value |
+--------------+------------------------------------------+
| enabled | True |
| id | dce3c0fe11b1441392ae69d16665983d |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | d1af95994a4542208268e279eba9ced7 |
| service_name | cinderv3 |
| service_type | volumev3 |
| url | http://controller:8776/v3/%(project_id)s |
+--------------+------------------------------------------+
# 创建块存储服务 API 端点
root@ubuntu:~# apt install cinder-api cinder-scheduler -y
root@ubuntu:~# cp /etc/cinder/cinder.conf /opt/bak/
# 备份文件
root@ubuntu:~# cat /etc/cinder/cinder.conf
[DEFAULT]
my_ip = 192.168.0.88
transport_url = rabbit://openstack:000000@controller
rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
iscsi_helper = lioadm
volume_name_template = volume-%s
volume_group = cinder-volumes
verbose = True
auth_strategy = keystone
state_path = /var/lib/cinder
lock_path = /var/lock/cinder
volumes_dir = /var/lib/cinder/volumes
enabled_backends = lvm
[database]
connection = mysql+pymysql://cinder:000000@controller/cinder
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = 000000
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
# 替换文件内容为上述内容
root@ubuntu:~# su -s /bin/sh -c "cinder-manage db sync" cinder
# 填充块存储数据库
root@ubuntu:~# sed -i "/^\[cinder\]/a\os_region_name = RegionOne" /etc/nova/nova.conf
# 配置计算以使用块存储
root@ubuntu:~# systemctl enable --now nova-api
Synchronizing state of nova-api.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable nova-api
root@ubuntu:~# systemctl restart nova-api
# 重启计算api服务
root@ubuntu:~# systemctl enable --now cinder-scheduler
Synchronizing state of cinder-scheduler.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable cinder-scheduler
root@ubuntu:~# systemctl restart cinder-scheduler
root@ubuntu:~# systemctl enable --now apache2
Synchronizing state of apache2.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable apache2
root@ubuntu:~# systemctl restart apache2
# 重新启动块存储服务
存储节点
root@ubuntu:~# apt -y install cinder-backup
root@ubuntu:~# sudo usermod -aG disk cinder
root@ubuntu:~# apt install lvm2 thin-provisioning-tools -y
root@ubuntu:~# pvcreate /dev/sdb1
Physical volume "/dev/sdb1" successfully created.
root@ubuntu:~# vgcreate cinder-volumes /dev/sdb1
Volume group "cinder-volumes" successfully created
# 创建逻辑卷和卷组
root@ubuntu:~# grep "devices {" /etc/lvm/lvm.conf -A100 |grep -Ev "^$|#"
devices {
dir = "/dev"
scan = [ "/dev" ]
obtain_device_list_from_udev = 1
external_device_info_source = "none"
filter = [ "a/sdb1/", "r/.*/"]
# 添加filter = [ "a/sdb1/", "r/.*/"]到文件中
# 如果您的存储节点在操作系统磁盘上使用LVM,您还必须将关联的设备添加到过滤器中。例如,如果/dev/sda设备包含操作系统:
root@ubuntu:~# apt install cinder-volume tgt -y
root@ubuntu:~# cat /etc/cinder/cinder.conf
[DEFAULT]
my_ip = 192.168.0.88
transport_url = rabbit://openstack:000000@controller
rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
iscsi_helper = lioadm
volume_name_template = volume-%s
volume_group = cinder-volumes
verbose = True
auth_strategy = keystone
state_path = /var/lib/cinder
lock_path = /var/lock/cinder
volumes_dir = /var/lib/cinder/volumes
enabled_backends = lvm
glance_api_servers = http://controller:9292
[database]
connection = mysql+pymysql://cinder:000000@controller/cinder
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = 000000
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
target_protocol = iscsi
target_helper = tgtadm
# 替换文件内容为上述内容
root@ubuntu:~# systemctl enable --now tgt cinder-volume
Synchronizing state of tgt.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable tgt
Synchronizing state of cinder-volume.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable cinder-volume
root@ubuntu:~# systemctl restart tgt cinder-volume
# 重启服务并设置开机自启
root@ubuntu:~# apt install cinder-backup
root@ubuntu:~# openstack catalog show object-store
root@ubuntu:~# grep -Ev "^$|#" /etc/cinder/cinder.conf
[DEFAULT]
my_ip = 192.168.0.88
transport_url = rabbit://openstack:000000@controller
rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
iscsi_helper = lioadm
volume_name_template = volume-%s
volume_group = cinder-volumes
verbose = True
auth_strategy = keystone
state_path = /var/lib/cinder
lock_path = /var/lock/cinder
volumes_dir = /var/lib/cinder/volumes
enabled_backends = lvm
glance_api_servers = http://controller:9292
backup_driver = cinder.backup.drivers.swift.SwiftBackupDriver
backup_swift_url = http://controller:8080
[database]
connection = mysql+pymysql://cinder:000000@controller/cinder
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = 000000
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
target_protocol = iscsi
target_helper = tgtadm
root@ubuntu:~# service cinder-backup restart
swift部署
控制节点
root@ubuntu:~# openstack user create --domain default --password-prompt swift
User Password:
Repeat User Password:
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | cfc9190742a641229e2f34c221c8f3f9 |
| name | swift |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+
root@ubuntu:~# openstack role add --project service --user swift admin
root@ubuntu:~# openstack service create --name swift \
--description "OpenStack Object Storage" object-store
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Object Storage |
| enabled | True |
| id | efafab3a29af43f2840217e5ca146699 |
| name | swift |
| type | object-store |
+-------------+----------------------------------+
# 创建swift服务实体
root@ubuntu:~# openstack endpoint create --region RegionOne \
object-store public http://controller:8080/v1/AUTH_%\(project_id\)s
+--------------+-----------------------------------------------+
| Field | Value |
+--------------+-----------------------------------------------+
| enabled | True |
| id | 8a62bcd81a5b4df491446e3f762af865 |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | efafab3a29af43f2840217e5ca146699 |
| service_name | swift |
| service_type | object-store |
| url | http://controller:8080/v1/AUTH_%(project_id)s |
+--------------+-----------------------------------------------+
root@ubuntu:~# openstack endpoint create --region RegionOne \
object-store internal http://controller:8080/v1/AUTH_%\(project_id\)s
+--------------+-----------------------------------------------+
| Field | Value |
+--------------+-----------------------------------------------+
| enabled | True |
| id | fe4f76765607407ea71f918416a758d0 |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | efafab3a29af43f2840217e5ca146699 |
| service_name | swift |
| service_type | object-store |
| url | http://controller:8080/v1/AUTH_%(project_id)s |
+--------------+-----------------------------------------------+
root@ubuntu:~# openstack endpoint create --region RegionOne \
object-store admin http://controller:8080/v1
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | f80a346ff70840348b7f9d8bcd693f55 |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | efafab3a29af43f2840217e5ca146699 |
| service_name | swift |
| service_type | object-store |
| url | http://controller:8080/v1 |
+--------------+----------------------------------+
# 创建对象存储服务 API 端点
root@ubuntu:~# apt-get install swift swift-proxy python3-swiftclient python3-keystoneclient python3-keystonemiddleware memcached -y
root@ubuntu:~# curl -o /etc/swift/proxy-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/proxy-server.conf-sample
# 安装软件包
root@ubuntu:~# cp /etc/swift/proxy-server.conf /opt/bak/
# 备份文件
root@ubuntu:~# grep -Ev "^$|#" /etc/swift/proxy-server.conf
[DEFAULT]
bind_port = 8080
user = swift
swift_dir = /etc/swift
[pipeline:main]
pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
[app:proxy-server]
account_autocreate = True
[filter:tempauth]
user_admin_admin = admin .admin .reseller_admin
user_admin_auditor = admin_ro .reseller_reader
user_test_tester = testing .admin
user_test_tester2 = testing2 .admin
user_test_tester3 = testing3
user_test2_tester2 = testing2 .admin
user_test5_tester5 = testing5 service
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = swift
password = 000000
delay_auth_decision = True
[filter:keystoneauth]
operator_roles = admin,user
[filter:s3api]
[filter:s3token]
reseller_prefix = AUTH_
delay_auth_decision = False
auth_uri = http://keystonehost:5000/v3
http_timeout = 10.0
[filter:healthcheck]
[filter:cache]
memcache_servers = controller:11211
[filter:ratelimit]
[filter:read_only]
[filter:domain_remap]
[filter:catch_errors]
[filter:cname_lookup]
[filter:staticweb]
[filter:tempurl]
[filter:formpost]
[filter:name_check]
[filter:etag-quoter]
[filter:list-endpoints]
[filter:proxy-logging]
[filter:bulk]
[filter:slo]
[filter:dlo]
[filter:container-quotas]
[filter:account-quotas]
[filter:gatekeeper]
[filter:container_sync]
[filter:xprofile]
[filter:versioned_writes]
[filter:copy]
[filter:keymaster]
meta_version_to_write = 2
encryption_root_secret = changeme
[filter:kms_keymaster]
[filter:kmip_keymaster]
[filter:encryption]
[filter:listing_formats]
[filter:symlink]
# 替换文件内容为上述内容
存储节点
root@ubuntu:~# apt-get install xfsprogs rsync -y
root@ubuntu:~# mkfs.xfs /dev/sdb2
meta-data=/dev/sdb2 isize=512 agcount=4, agsize=327680 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=1, rmapbt=0
= reflink=1 bigtime=0 inobtcount=0
data = bsize=4096 blocks=1310720, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0, ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
root@ubuntu:~# mkfs.xfs /dev/sdb3
meta-data=/dev/sdb3 isize=512 agcount=4, agsize=327616 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=1, rmapbt=0
= reflink=1 bigtime=0 inobtcount=0
data = bsize=4096 blocks=1310464, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0, ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
root@ubuntu:~# mkfs.xfs /dev/sdc
meta-data=/dev/sdc isize=512 agcount=4, agsize=327680 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=1, rmapbt=0
= reflink=1 bigtime=0 inobtcount=0
data = bsize=4096 blocks=1310720, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0, ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
# 将/dev/sdb和/dev/sdc设备格式化为 XFS
root@ubuntu:~# mkdir -p /srv/node/sdb2
root@ubuntu:~# mkdir -p /srv/node/sdb3
root@ubuntu:~# mkdir -p /srv/node/sdc
# 创建挂载点目录结构
root@ubuntu:~# blkid
/dev/sr0: BLOCK_SIZE="2048" UUID="2024-02-16-23-52-30-00" LABEL="Ubuntu-Server 22.04.4 LTS amd64" TYPE="iso9660" PTTYPE="PMBR"
/dev/mapper/ubuntu--vg-ubuntu--lv: UUID="90eb789a-ab89-4ba0-9f56-ab4cbf3d5499" BLOCK_SIZE="4096" TYPE="ext4"
/dev/sda2: UUID="c5e073e4-da11-497d-8aea-e934a7f0590b" BLOCK_SIZE="4096" TYPE="ext4" PARTUUID="0a7b865a-c3fd-4bf9-814c-490db57dc2b4"
/dev/sda3: UUID="5Nj501-hGHQ-kQK5-TFxb-8H1C-hxDc-KO67G5" TYPE="LVM2_member" PARTUUID="38195098-22f7-4566-93f5-3f847c1d9023"
/dev/loop1: TYPE="squashfs"
/dev/sdb2: UUID="4a191342-2cac-459a-be26-8ff0d6a0f7b4" BLOCK_SIZE="512" TYPE="xfs" PARTUUID="442d3370-02"
/dev/sdb3: UUID="0b686029-eb4c-42d4-ab95-b530d525d7db" BLOCK_SIZE="512" TYPE="xfs" PARTUUID="442d3370-03"
/dev/sdb1: UUID="gvKLii-fW89-XRtm-CK3r-eW8b-JMNC-rkBBEJ" TYPE="LVM2_member" PARTUUID="442d3370-01"
/dev/loop4: TYPE="squashfs"
/dev/loop2: TYPE="squashfs"
/dev/loop0: TYPE="squashfs"
/dev/sda1: PARTUUID="aae6c40c-4b98-41b5-9f6e-3e0b71e3436f"
/dev/loop5: TYPE="squashfs"
/dev/loop3: TYPE="squashfs"
# 查找新分区的 UUID
root@ubuntu:~# echo 'UUID="4a191342-2cac-459a-be26-8ff0d6a0f7b4" /srv/node/sdb2 xfs noatime 0 2' >> /etc/fstab
root@ubuntu:~# echo 'UUID="0b686029-eb4c-42d4-ab95-b530d525d7db" /srv/node/sdb3 xfs noatime 0 2' >> /etc/fstab
root@ubuntu:~# echo 'UUID="f2784469-eb3c-4208-a947-f73f830fb19b" /srv/node/sdc xfs noatime 0 2' >> /etc/fstab
# 将查询到的UUID写入并添加以上内容
root@ubuntu:~# mount /dev/sdb2
root@ubuntu:~# mount /dev/sdb3
root@ubuntu:~# mount /dev/sdc
# 安装设备
root@ubuntu:~# cat /etc/rsyncd.conf
uid = swift
gid = swift
log file = /var/log/rsyncd.log
pid file = /var/run/rsyncd.pid
address = 192.168.0.88
[account]
max connections = 2
path = /srv/node/
read only = False
lock file = /var/lock/account.lock
[container]
max connections = 2
path = /srv/node/
read only = False
lock file = /var/lock/container.lock
[object]
max connections = 2
path = /srv/node/
read only = False
lock file = /var/lock/object.lock
# 替换文件为上述内容
root@ubuntu:~# apt-get install swift swift-account swift-container swift-object -y
# 安装软件包
root@ubuntu:~# sed -i "s/RSYNC_ENABLE=false/RSYNC_ENABLE=true/g" /etc/default/rsync
root@ubuntu:~# systemctl enable --now rsync
Synchronizing state of rsync.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable rsync
Created symlink /etc/systemd/system/multi-user.target.wants/rsync.service → /lib/systemd/system/rsync.service.
root@ubuntu:~# systemctl restart --now rsync
# 启动rsync
root@ubuntu:~# curl -o /etc/swift/account-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/account-server.conf-sample
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 13093 100 13093 0 0 17517 0 --:--:-- --:--:-- --:--:-- 17504
root@ubuntu:~# curl -o /etc/swift/container-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/container-server.conf-sample
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 22813 100 22813 0 0 24492 0 --:--:-- --:--:-- --:--:-- 24477
root@ubuntu:~# curl -o /etc/swift/object-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/object-server.conf-sample
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 32780 100 32780 0 0 37586 0 --:--:-- --:--:-- --:--:-- 37591
# 从对象存储源存储库获取记账、容器和对象服务配置文件
root@ubuntu:~# grep -Ev "^$|#" /etc/swift/account-server.conf
[DEFAULT]
bind_ip = 192.168.0.88
bind_port = 6202
user = swift
swift_dir = /etc/swift
devices = /srv/node
mount_check = True
[pipeline:main]
pipeline = healthcheck recon account-server
[app:account-server]
[filter:healthcheck]
[filter:recon]
recon_cache_path = /var/cache/swift
[filter:backend_ratelimit]
[account-replicator]
[account-auditor]
[account-reaper]
[filter:xprofile]
root@ubuntu:~# grep -Ev "^$|#" /etc/swift/container-server.conf
[DEFAULT]
bind_ip = 192.168.0.88
bind_port = 6201
user = swift
swift_dir = /etc/swift
devices = /srv/node
mount_check = True
[pipeline:main]
pipeline = healthcheck recon container-server
[app:container-server]
[filter:healthcheck]
[filter:recon]
recon_cache_path = /var/cache/swif
[filter:backend_ratelimit]
[container-replicator]
[container-updater]
[container-auditor]
[container-sync]
[filter:xprofile]
[container-sharder]
root@ubuntu:~# grep -Ev "^$|#" /etc/swift/object-server.conf
[DEFAULT]
bind_ip = 192.168.0.88
bind_port = 6200
user = swift
swift_dir = /etc/swift
devices = /srv/node
mount_check = True
[pipeline:main]
pipeline = healthcheck recon object-server
[app:object-server]
[filter:healthcheck]
[filter:recon]
recon_cache_path = /var/cache/swift
recon_lock_path = /var/lock
[filter:backend_ratelimit]
[object-replicator]
[object-reconstructor]
[object-updater]
[object-auditor]
[object-expirer]
[filter:xprofile]
[object-relinker]
# 修改文件内容为上述内容
root@ubuntu:~# chown -R swift:swift /srv/node
# 确保挂载点目录结构的正确所有权
root@ubuntu:~# mkdir -p /var/cache/swift
root@ubuntu:~# chown -R root:swift /var/cache/swift
root@ubuntu:~# chmod -R 775 /var/cache/swift
# 创建recon目录并确保其正确的所有权
控制节点-创建并分发初始环
创建账号环
root@ubuntu:~# cd /etc/swift/
# 一定要记得切换目录
root@ubuntu:/etc/swift# swift-ring-builder account.builder create 10 3 1
root@ubuntu:/etc/swift# swift-ring-builder account.builder add --region 1 --zone 1 --ip 192.168.0.88 --port 6202 --device sdb2 --weight 100
Device d0r1z1-192.168.0.88:6202R192.168.0.88:6202/sdb2_"" with 100.0 weight got id 0
root@ubuntu:/etc/swift# swift-ring-builder account.builder add --region 1 --zone 1 --ip 192.168.0.88 --port 6202 --device sdb3 --weight 100
Device d1r1z1-192.168.0.88:6202R192.168.0.88:6202/sdb3_"" with 100.0 weight got id 1
root@ubuntu:/etc/swift# swift-ring-builder account.builder add --region 1 --zone 1 --ip 192.168.0.88 --port 6202 --device sdc --weight 100
Device d2r1z1-192.168.0.88:6202R192.168.0.88:6202/sdc_"" with 100.0 weight got id 2
root@ubuntu:/etc/swift# swift-ring-builder account.builder
account.builder, build version 3, id 59ed571a6dda45a7810282b36c4477bf
1024 partitions, 3.000000 replicas, 1 regions, 1 zones, 3 devices, 100.00 balance, 0.00 dispersion
The minimum number of hours before a partition can be reassigned is 1 (0:00:00 remaining)
The overload factor is 0.00% (0.000000)
Ring file account.ring.gz not found, probably it hasn't been written yet
Devices: id region zone ip address:port replication ip:port name weight partitions balance flags meta
0 1 1 192.168.0.88:6202 192.168.0.88:6202 sdb2 100.00 0 -100.00
1 1 1 192.168.0.88:6202 192.168.0.88:6202 sdb3 100.00 0 -100.00
2 1 1 192.168.0.88:6202 192.168.0.88:6202 sdc 100.00 0 -100.00
# 验证环内容
root@ubuntu:/etc/swift# swift-ring-builder account.builder rebalance
Reassigned 3072 (300.00%) partitions. Balance is now 0.00. Dispersion is now 0.00
# 重新平衡环
创建容器环
root@ubuntu:/etc/swift# swift-ring-builder container.builder create 10 3 1
root@ubuntu:/etc/swift# swift-ring-builder container.builder add --region 1 --zone 1 --ip 192.168.0.88 --port 6201 --device sdb2 --weight 100
Device d0r1z1-192.168.0.88:6201R192.168.0.88:6201/sdb2_"" with 100.0 weight got id 0
root@ubuntu:/etc/swift# swift-ring-builder container.builder add --region 1 --zone 1 --ip 192.168.0.88 --port 6201 --device sdb3 --weight 100
Device d1r1z1-192.168.0.88:6201R192.168.0.88:6201/sdb3_"" with 100.0 weight got id 1
root@ubuntu:/etc/swift# swift-ring-builder container.builder add --region 1 --zone 1 --ip 192.168.0.88 --port 6201 --device sdc --weight 100
Device d2r1z1-192.168.0.88:6201R192.168.0.88:6201/sdc_"" with 100.0 weight got id 2
# 创建容器环
root@ubuntu:/etc/swift# swift-ring-builder container.builder
container.builder, build version 3, id 259b738f39854fcfb332e30b4d5becf8
1024 partitions, 3.000000 replicas, 1 regions, 1 zones, 3 devices, 100.00 balance, 0.00 dispersion
The minimum number of hours before a partition can be reassigned is 1 (0:00:00 remaining)
The overload factor is 0.00% (0.000000)
Ring file container.ring.gz not found, probably it hasn't been written yet
Devices: id region zone ip address:port replication ip:port name weight partitions balance flags meta
0 1 1 192.168.0.88:6201 192.168.0.88:6201 sdb2 100.00 0 -100.00
1 1 1 192.168.0.88:6201 192.168.0.88:6201 sdb3 100.00 0 -100.00
2 1 1 192.168.0.88:6201 192.168.0.88:6201 sdc 100.00 0 -100.00
# 验证环内容
root@ubuntu:/etc/swift# swift-ring-builder container.builder rebalance
Reassigned 3072 (300.00%) partitions. Balance is now 0.00. Dispersion is now 0.00
# 重新平衡环
创建对象环
root@ubuntu:/etc/swift# swift-ring-builder object.builder create 10 3 1
root@ubuntu:/etc/swift# swift-ring-builder object.builder add --region 1 --zone 1 --ip 192.168.0.88 --port 6200 --device sdb2 --weight 100
Device d0r1z1-192.168.0.88:6200R192.168.0.88:6200/sdb2_"" with 100.0 weight got id 0
root@ubuntu:/etc/swift# swift-ring-builder object.builder add --region 1 --zone 1 --ip 192.168.0.88 --port 6200 --device sdb3 --weight 100
Device d1r1z1-192.168.0.88:6200R192.168.0.88:6200/sdb3_"" with 100.0 weight got id 1
root@ubuntu:/etc/swift# swift-ring-builder object.builder add --region 1 --zone 1 --ip 192.168.0.88 --port 6200 --device sdc --weight 100
Device d2r1z1-192.168.0.88:6200R192.168.0.88:6200/sdc_"" with 100.0 weight got id 2
# 创建对象环
root@ubuntu:/etc/swift# swift-ring-builder object.builder
object.builder, build version 3, id c6bf19d9519c4d459a1293cc62324324
1024 partitions, 3.000000 replicas, 1 regions, 1 zones, 3 devices, 100.00 balance, 0.00 dispersion
The minimum number of hours before a partition can be reassigned is 1 (0:00:00 remaining)
The overload factor is 0.00% (0.000000)
Ring file object.ring.gz not found, probably it hasn't been written yet
Devices: id region zone ip address:port replication ip:port name weight partitions balance flags meta
0 1 1 192.168.0.88:6200 192.168.0.88:6200 sdb2 100.00 0 -100.00
1 1 1 192.168.0.88:6200 192.168.0.88:6200 sdb3 100.00 0 -100.00
2 1 1 192.168.0.88:6200 192.168.0.88:6200 sdc 100.00 0 -100.00
# 验证环内容
root@ubuntu:/etc/swift# swift-ring-builder object.builder rebalance
Reassigned 3072 (300.00%) partitions. Balance is now 0.00. Dispersion is now 0.00
# 重新平衡环
如果有其他节点
- 将 、 和 文件复制
account.ring.gz
到container.ring.gz
每个object.ring.gz
存储/etc/swift
节点以及运行代理服务的任何其他节点上的目录。
root@ubuntu:~# curl -o /etc/swift/swift.conf https://opendev.org/openstack/swift/raw/branch/master/etc/swift.conf-sample
# /etc/swift/swift.conf从对象存储源存储库获取文件
root@ubuntu:~# cp /etc/swift/swift.conf /opt/bak/
# 备份文件
root@ubuntu:/etc/swift# grep -Ev "^$|#" /etc/swift/swift.conf
[swift-hash]
swift_hash_path_suffix = changeme
swift_hash_path_prefix = changeme
[storage-policy:0]
name = Policy-0
default = yes
aliases = yellow, orange
[swift-constraints]
# 替换文件内容为上述内容
root@ubuntu:~# chown -R root:swift /etc/swift
# 在所有节点上,确保配置目录的正确所有权
root@ubuntu:/etc/swift# systemctl enable --now memcached swift-proxy
Synchronizing state of memcached.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable memcached
Synchronizing state of swift-proxy.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable swift-proxy
root@ubuntu:/etc/swift# systemctl restart memcached swift-proxy
root@ubuntu:/etc/swift# swift-init all start
# 完成安装
4-skyline部署
skyline-apiserver源码部署
root@ubuntu:~# mysql -uroot -p000000 -e "CREATE DATABASE skyline DEFAULT CHARACTER SET \
utf8 DEFAULT COLLATE utf8_general_ci;"
root@ubuntu:~# mysql -uroot -p000000 -e "GRANT ALL PRIVILEGES ON skyline.* TO 'skyline'@'localhost' \
IDENTIFIED BY '000000';"
root@ubuntu:~# mysql -uroot -p000000 -e "GRANT ALL PRIVILEGES ON skyline.* TO 'skyline'@'%' \
IDENTIFIED BY '000000';"
root@ubuntu:~# mysql -uroot -p000000 -e "use mysql; select user,host from user;" |grep skyline
skyline %
skyline localhost
# 创建数据库及用户
root@ubuntu:~# openstack user create --domain default --password-prompt skyline
User Password:
Repeat User Password:
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | 40485b08515a42828387f38dff787f45 |
| name | skyline |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+
# 创建skyline用户
root@ubuntu:~# openstack role add --project service --user skyline admin
# admin为用户添加角色skyline
root@ubuntu:~# sudo apt update
root@ubuntu:~# sudo apt install -y git
root@ubuntu:~# cd ${HOME}
root@ubuntu:~# git clone https://opendev.org/openstack/skyline-apiserver.git
# Git 从 OpenDev (GitHub) 克隆存储库
root@ubuntu:~# sudo apt install -y python3-pip
root@ubuntu:~# sudo pip3 install skyline-apiserver/
# 从源安装 skyline-apiserver
root@ubuntu:~# sudo mkdir -p /etc/skyline /var/log/skyline
# 确保skyline-apiserver的一些文件夹已经创建
root@ubuntu:~# sudo mkdir -p /etc/skyline/policy
# 修改服务策略规则
# 将服务策略 yaml 文件重命名为<service_name>_policy.yaml,并将其放入/etc/skyline/policy文件夹中。
root@ubuntu:~# sudo cp ${HOME}/skyline-apiserver/etc/gunicorn.py /etc/skyline/gunicorn.py
root@ubuntu:~# sudo sed -i "s/^bind = *.*/bind = ['0.0.0.0:28000']/g" /etc/skyline/gunicorn.py
root@ubuntu:~# sudo cp ${HOME}/skyline-apiserver/etc/skyline.yaml.sample /etc/skyline/skyline.yaml
# 将配置文件复制到配置文件夹中/etc/skyline
# 我们需要将bind值更改/etc/skyline/gunicorn.py为0.0.0.0:28000。默认值为unix:/var/lib/skyline/skyline.sock。
root@ubuntu:~# cp /etc/skyline/skyline.yaml /opt/bak/skyline.yaml.bak
# 备份文件
root@ubuntu:~# cat /etc/skyline/skyline.yaml
default:
access_token_expire: 3600
access_token_renew: 1800
cafile: ''
cors_allow_origins: []
database_url: mysql://skyline:000000@controller:3306/skyline
debug: true
log_dir: /var/log/skyline
log_file: skyline.log
policy_file_path: /etc/skyline/policy
policy_file_suffix: policy.yaml
prometheus_basic_auth_password: ''
prometheus_basic_auth_user: ''
prometheus_enable_basic_auth: false
prometheus_endpoint: http://localhost:9091
secret_key: aCtmgbcUqYUy_HNVg5BDXCaeJgJQzHJXwqbXr0Nmb2o
session_name: session
ssl_enabled: true
openstack:
base_domains:
- heat_user_domain
default_region: RegionOne
enforce_new_defaults: true
extension_mapping:
floating-ip-port-forwarding: neutron_port_forwarding
fwaas_v2: neutron_firewall
qos: neutron_qos
vpnaas: neutron_vpn
interface_type: public
keystone_url: http://192.168.0.88:5000/v3/
nginx_prefix: /api/openstack
reclaim_instance_interval: 604800
service_mapping:
baremetal: ironic
compute: nova
container: zun
container-infra: magnum
database: trove
dns: designate
identity: keystone
image: glance
instance-ha: masakari
key-manager: barbican
load-balancer: octavia
network: neutron
object-store: swift
orchestration: heat
placement: placement
sharev2: manilav2
volumev3: cinder
sso_enabled: false
sso_protocols:
- openid
sso_region: RegionOne
system_admin_roles:
- admin
- system_admin
system_project: service
system_project_domain: Default
system_reader_roles:
- system_reader
system_user_domain: Default
system_user_name: skyline
system_user_password: '000000'
setting:
base_settings:
- flavor_families
- gpu_models
- usb_models
flavor_families:
- architecture: x86_architecture
categories:
- name: general_purpose
properties: []
- name: compute_optimized
properties: []
- name: memory_optimized
properties: []
- name: high_clock_speed
properties: []
- architecture: heterogeneous_computing
categories:
- name: compute_optimized_type_with_gpu
properties: []
- name: visualization_compute_optimized_type_with_gpu
properties: []
gpu_models:
- nvidia_t4
usb_models:
- usb_c
# 更改相关配置/etc/skyline/skyline.yaml
root@ubuntu:~# cd ${HOME}/skyline-apiserver/
root@ubuntu:~/skyline-apiserver# make db_sync
# 填充 Skyline APIServer 数据库
root@ubuntu:~/skyline-apiserver# cat > /etc/systemd/system/skyline-apiserver.service <<EOF
[Unit]
Description=Skyline APIServer
[Service]
Type=simple
ExecStart=/usr/local/bin/gunicorn -c /etc/skyline/gunicorn.py skyline_apiserver.main:app
LimitNOFILE=32768
[Install]
WantedBy=multi-user.target
EOF
# 设置启动服务配置
root@ubuntu:~/skyline-apiserver# sudo systemctl daemon-reload
root@ubuntu:~/skyline-apiserver# sudo systemctl enable skyline-apiserver
Created symlink /etc/systemd/system/multi-user.target.wants/skyline-apiserver.service → /etc/systemd/system/skyline-apiserver.service.
root@ubuntu:~/skyline-apiserver# sudo systemctl start skyline-apiserver
# 启动服务并设置开机自启
skyline-console源码部署
root@ubuntu:~# wget -P /root/ --tries=10 --retry-connrefused --waitretry=60 --no-dns-cache --no-cache https://raw.githubusercontent.com/nvm-sh/nvm/master/install.sh
root@ubuntu:~# bash /root/install.sh
root@ubuntu:~# . /root/.nvm/nvm.sh
# 安装nvm
root@ubuntu:~# nvm install --lts=Erbium
root@ubuntu:~# nvm alias default lts/erbium
root@ubuntu:~# nvm use default
Now using node v12.22.12 (npm v6.14.16)
# 安装nodejs
root@ubuntu:~# npm install -g yarn
# 安装yarn
root@ubuntu:~# git clone https://opendev.org/openstack/skyline-console.git
# 下载源码
root@ubuntu:~# cd skyline-console/
root@ubuntu:~/skyline-console# ln -s /usr/bin/python3 /usr/bin/python
# 把Makefile文件里的python统一改成python3,不然执行make命令会报错,或者加个软链接给python
root@ubuntu:~/skyline-console# make package
root@ubuntu:~/skyline-console# pip3 install --force-reinstall dist/skyline_console-*.whl -i https://mirrors.aliyun.com/pypi/simple/
# 安装依赖
root@ubuntu:~/skyline-console# apt install nginx -y
root@ubuntu:~/skyline-console# skyline-nginx-generator -o /etc/nginx/nginx.conf
root@ubuntu:~/skyline-console# sudo sed -i "s/server .* fail_timeout=0;/server 0.0.0.0:28000 fail_timeout=0;/g" /etc/nginx/nginx.conf
# 生成nginx配置文件
root@ubuntu:~/skyline-console# cd
root@ubuntu:~# systemctl enable --now nginx
Synchronizing state of nginx.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable nginx
root@ubuntu:~# systemctl restart nginx
# 启动服务并设置开机自启
skyline-console部署-容器部署(有问题,暂未解决,先不使用容器化部署)
# Add Docker's official GPG key:
sudo apt-get update
sudo apt-get install ca-certificates curl
sudo install -m 0755 -d /etc/apt/keyrings
sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
sudo chmod a+r /etc/apt/keyrings/docker.asc
# Add the repository to Apt sources:
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
$(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
# 注意上述命令要一道一道打,不然可能失败
root@ubuntu:~# sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
# 安装docker
root@ubuntu:~# mkdir /etc/docker -p
root@ubuntu:~# sudo tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["https://32yzbld0.mirror.aliyuncs.com"]
}
EOF
# 配置加速器
root@ubuntu:~# systemctl daemon-reload
root@ubuntu:~# systemctl restart docker
root@ubuntu:~# systemctl enable --now docker
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
# 重启服务并设置开机自启
root@ubuntu:~# sudo docker run hello-world
...
# 测试docker状态
root@ubuntu:~# docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
0f9bef890236 99cloud/skyline:latest "start_service.sh" 8 seconds ago Exited (0) 7 seconds ago skyline_bootstrap
ee5200873960 hello-world "/hello" About a minute ago Exited (0) About a minute ago jolly_hypatia
# 确保docker ps -a 中该容器退出码为0,若不为0,查看容器日志排查
root@ubuntu:~# docker rm -f skyline_bootstrap
skyline_bootstrap
# 清理引导容器
root@ubuntu:~# sudo docker run -d --name skyline --restart=always -v /etc/skyline/skyline.yaml:/etc/skyline/skyline.yaml -v /var/log:/var/log --net=host 99cloud/skyline:latest
# 运行 skyline-apiserver
5-ceph部署
root@ubuntu:~# apt install -y cephadm
root@ubuntu:~# grep -Ev "^$|#" /etc/lvm/lvm.conf |grep "devices {" -A10
devices {
dir = "/dev"
scan = [ "/dev" ]
obtain_device_list_from_udev = 1
external_device_info_source = "none"
filter = [ "a/sdb1/", "a/sdd/", "r/.*/"]
# 注意修改上面这一行,根据实际环境修改,我的是sdd
sysfs_scan = 1
scan_lvs = 0
multipath_component_detection = 1
md_component_detection = 1
root@ubuntu:~# cephadm bootstrap --mon-ip 192.168.0.88
...
Ceph Dashboard is now available at:
URL: https://ubuntu:8443/
User: admin
Password: 57sd618x5t
# 网页访问
root@ubuntu:~# apt install -y ceph-common
# 安装ceph命令行
# 因为使用容器部署,所以修改配置文件后需要重启容器
ceph对接openstack
root@ubuntu:~# ceph osd pool create volumes
pool 'volumes' created
root@ubuntu:~# ceph osd pool create images
pool 'images' created
root@ubuntu:~# ceph osd pool create backups
pool 'backups' created
root@ubuntu:~# ceph osd pool create vms
pool 'vms' created
# 为 Cinder 创建一个池,为 Glance 创建一个池
root@ubuntu:~# ceph auth get-or-create client.glance mon 'profile rbd' osd 'profile rbd pool=images' mgr 'profile rbd pool=images'
[client.glance]
key = AQB6MEdmv20qMBAAMYpOIWvD+g2sxAsn1FzE9A==
root@ubuntu:~# ceph auth get-or-create client.cinder mon 'profile rbd' osd 'profile rbd pool=volumes, profile rbd pool=vms, profile rbd-read-only pool=images' mgr 'profile rbd pool=volumes, profile rbd pool=vms'
[client.cinder]
key = AQB7MEdmsjk5AxAAa5usrYsj4Ge7DghCQvo5Xg==
root@ubuntu:~# ceph auth get-or-create client.cinder-backup mon 'profile rbd' osd 'profile rbd pool=backups' mgr 'profile rbd pool=backups'
[client.cinder-backup]
key = AQB7MEdmxa/4ERAA2cNKeN1kXn9ds/kAkEN8gg==
root@ubuntu:~# ceph auth get-or-create client.glance
[client.glance]
key = AQB6MEdmv20qMBAAMYpOIWvD+g2sxAsn1FzE9A==
root@ubuntu:~# ceph auth get-or-create client.cinder
[client.cinder]
key = AQB7MEdmsjk5AxAAa5usrYsj4Ge7DghCQvo5Xg==
root@ubuntu:~# ceph auth get-or-create client.cinder-backup
[client.cinder-backup]
key = AQB7MEdmxa/4ERAA2cNKeN1kXn9ds/kAkEN8gg==
# 如果您启用了cephx 身份验证,请为 Nova/Cinder 和 Glance 创建一个新用户
ceph auth get-or-create client.glance | ssh 192.168.0.88 sudo tee /etc/ceph/ceph.client.glance.keyring
ssh 192.168.0.88 sudo chown glance:glance /etc/ceph/ceph.client.glance.keyring
ceph auth get-or-create client.cinder | ssh 192.168.0.88 sudo tee /etc/ceph/ceph.client.cinder.keyring
ssh 192.168.0.88 sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
ceph auth get-or-create client.cinder-backup | ssh 192.168.0.88 sudo tee /etc/ceph/ceph.client.cinder-backup.keyring
ssh 192.168.0.88 sudo chown cinder:cinder /etc/ceph/ceph.client.cinder-backup.keyring
# client.cinder将、client.glance和 的密钥环添加client.cinder-backup到适当的节点并更改其所有权
root@ubuntu:~# ceph auth get-or-create client.cinder | ssh 192.168.0.88 sudo tee /etc/ceph/ceph.client.cinder.keyring
[client.cinder]
key = AQB7MEdmsjk5AxAAa5usrYsj4Ge7DghCQvo5Xg==
# 运行的节点nova-compute需要该进程的密钥环文件nova-compute
root@ubuntu:~# ceph auth get-key client.cinder | ssh 192.168.0.88 tee client.cinder.key
AQB7MEdmsjk5AxAAa5usrYsj4Ge7DghCQvo5Xg==
# 他们还需要将用户的密钥存储client.cinder在 libvirt. libvirt 进程需要它来访问集群,同时从 Cinder 连接块设备。
# 在运行的节点上创建密钥的临时副本 nova-compute
root@ubuntu:~# uuidgen
71a49f7b-b732-47dc-8769-8589e53a7ed0
root@ubuntu:~# cat > secret.xml <<EOF
<secret ephemeral='no' private='no'>
<uuid>71a49f7b-b732-47dc-8769-8589e53a7ed0</uuid>
<usage type='ceph'>
<name>client.cinder secret</name>
</usage>
</secret>
EOF
root@ubuntu:~# sudo virsh secret-define --file secret.xml
Secret 71a49f7b-b732-47dc-8769-8589e53a7ed0 created
root@ubuntu:~# sudo virsh secret-set-value --secret 71a49f7b-b732-47dc-8769-8589e53a7ed0 --base64 $(cat client.cinder.key) && rm client.cinder.key secret.xml
error: Passing secret value as command-line argument is insecure!
Secret value set
# 然后,在计算节点上,添加密钥libvirt并删除密钥的临时副本
root@ubuntu:~# cat >> /etc/glance/glance-api.conf <<EOF
[glance_store]
stores = rbd
default_store = rbd
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
rbd_store_chunk_size = 8
EOF
# 在该部分下编辑/etc/glance/glance-api.conf并添加[glance_store],要注释掉其他内容
root@ubuntu:~# grep -Ev "^$|#" /etc/cinder/cinder.conf
[DEFAULT]
my_ip = 192.168.0.88
transport_url = rabbit://openstack:000000@controller
rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
iscsi_helper = lioadm
volume_name_template = volume-%s
volume_group = cinder-volumes
verbose = True
auth_strategy = keystone
state_path = /var/lib/cinder
lock_path = /var/lock/cinder
volumes_dir = /var/lib/cinder/volumes
enabled_backends = lvm,ceph
glance_api_version = 2
[database]
connection = mysql+pymysql://cinder:000000@controller/cinder
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = 000000
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
target_protocol = iscsi
target_helper = tgtadm
[ceph]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = ceph
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
rbd_user = cinder
rbd_secret_uuid = 71a49f7b-b732-47dc-8769-8589e53a7ed0
# 配置 CINDER
# 请注意,如果您要配置多个 cinder 后端, 则必须在部分中。glance_api_version = 2[DEFAULT]
root@ubuntu:~# cat >> /etc/cinder/cinder.conf <<EOF
backup_driver = cinder.backup.drivers.ceph
backup_ceph_conf = /etc/ceph/ceph.conf
backup_ceph_user = cinder-backup
backup_ceph_chunk_size = 134217728
backup_ceph_pool = backups
backup_ceph_stripe_unit = 0
backup_ceph_stripe_count = 0
restore_discard_excess_bytes = true
EOF
# 配置 CINDER 备份
# 配置nova