I try to use opetion of "nmcli" command.
option "-t" and "-f"
ex. 1
[root@localhost ~]# nmcli -f ipv4.addresses c s enp0s3 ipv4.addresses: 172.16.10.111/24 [root@localhost ~]#
ex. 2
[root@localhost ~]# nmcli -t -f ipv4.addresses c s enp0s3 ipv4.addresses:172.16.10.111/24 [root@localhost ~]#
ex. 3
[root@localhost ~]# nmcli -t -f ipv4 c s enp0s3 ipv4.method:manual ipv4.dns:172.16.10.90 ipv4.dns-search: ipv4.dns-options: ipv4.dns-priority:0 ipv4.addresses:172.16.10.111/24 ipv4.gateway:172.16.10.1 ipv4.routes: ipv4.route-metric:-1 ipv4.route-table:0 ipv4.ignore-auto-routes:no ipv4.ignore-auto-dns:no ipv4.dhcp-client-id: ipv4.dhcp-timeout:0 ipv4.dhcp-send-hostname:yes ipv4.dhcp-hostname: ipv4.dhcp-fqdn: ipv4.never-default:no ipv4.may-fail:yes ipv4.dad-timeout:-1 [root@localhost ~]#
option "-g"
ex. 1
[root@localhost ~]# nmcli -g connection.id c s enp0s3 enp0s3 [root@localhost ~]#
ex. 2
[root@localhost ~]# nmcli -g ipv4.addresses c s enp0s3 172.16.10.111/24 [root@localhost ~]#
systemd configuration for coredns
I refer to the page below.
https://github.com/coredns/deployment/blob/master/systemd/coredns.service
add coredns user
[root@cent7devops ~]# useradd coredns -s /sbin/nologin -c 'coredns user'
create service file for systemd
[root@cent7devops ~]# ls -l /etc/systemd/system/coredns.service -rw-r--r-- 1 root root 464 Aug 16 08:27 /etc/systemd/system/coredns.service [root@cent7devops ~]#
[root@cent7devops ~]# cat /etc/systemd/system/coredns.service [Unit] Description=CoreDNS DNS server Documentation=https://coredns.io After=network.target [Service] PermissionsStartOnly=true LimitNOFILE=1048576 LimitNPROC=512 CapabilityBoundingSet=CAP_NET_BIND_SERVICE AmbientCapabilities=CAP_NET_BIND_SERVICE NoNewPrivileges=true User=coredns WorkingDirectory=/home/coredns ExecStart=/usr/bin/coredns -conf=/etc/coredns/Corefile ExecReload=/bin/kill -SIGUSR1 $MAINPID Restart=on-failure [Install] WantedBy=multi-user.target [root@cent7devops ~]#
start coredns service
check status
[root@cent7devops ~]# systemctl is-active coredns unknown [root@cent7devops ~]#
start coredns service
[root@cent7devops ~]# systemctl start coredns [root@cent7devops ~]# systemctl is-active coredns active [root@cent7devops ~]#
use Ansible vault.
about ansible version
check version.
# ansible --version ansible 2.6.1 config file = /etc/ansible/ansible.cfg configured module search path = [u'/root/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python2.7/site-packages/ansible executable location = /usr/bin/ansible python version = 2.7.5 (default, Jul 13 2018, 13:06:57) [GCC 4.8.5 20150623 (Red Hat 4.8.5-28)] #
use ansible-vault.
This is inventory file.
# cat hosts [test] node01 ansible_host=172.16.10.101 ansible_user=root #
I encrypt host vars file.
Before:
# cat host_vars/node01.yml --- ansible_ssh_pass: <pass> #
I use ansible-vault command to encrypt this file for "Host Variables".
# ansible-vault encrypt host_vars/node01.yml New Vault password: Confirm New Vault password: Encryption successful #
After:
# cat host_vars/node01.yml $ANSIBLE_VAULT;1.1;AES256 *snip* #
After I encrypted inventory file by ansible-vault, I execute ansible without --ask-vault-pass option.
This result is ERROR.
# ansible-playbook -i hosts access.yml PLAY [test] **************************************************************************************** ERROR! Attempting to decrypt but no vault secrets found #
so I execute ansible without --ask-vault-pass option
# ansible-playbook -i hosts access.yml --ask-vault-pass Vault password: PLAY [test] **************************************************************************************** TASK [Gathering Facts] ***************************************************************************** ok: [node01] TASK [Execute hostname] **************************************************************************** changed: [node01] TASK [Execute id] ********************************************************************************** changed: [node01] TASK [Execute date] ******************************************************************************** changed: [node01] PLAY RECAP ***************************************************************************************** node01 : ok=4 changed=3 unreachable=0 failed=0 #
and I can also use --vault-password-file option.
# vi vault.txt # ansible-playbook -i hosts access.yml --vault-password-file=./vault.txt PLAY [test] **************************************************************************************** *snip* PLAY RECAP ***************************************************************************************** node01 : ok=4 changed=3 unreachable=0 failed=0 #
or use --vault-id option.
# ansible-playbook -i hosts access.yml --vault-id vault.txt PLAY [test] **************************************************************************************** *snip* PLAY RECAP ***************************************************************************************** node01 : ok=4 changed=3 unreachable=0 failed=0 #
I'd like to use ceph dashboard.
Before this page, please check this post.
I'd like to use CephFS on CentOS 7. - AKAI TSUKI
ceph dashboard
I'd like to use ceph dashboard.
http://docs.ceph.com/docs/mimic/mgr/dashboard/
[cuser@ceph01 ~]$ sudo ceph mgr module enable dashboard [cuser@ceph01 ~]$
[cuser@ceph01 ~]$ sudo ceph mgr module ls
{
"enabled_modules": [
"balancer",
"dashboard",
"iostat",
"restful",
"status"
],
"disabled_modules": [
{
"name": "hello",
"can_run": true,
"error_string": ""
},
{
"name": "influx",
"can_run": false,
"error_string": "influxdb python module not found"
},
{
"name": "localpool",
"can_run": true,
"error_string": ""
},
{
"name": "prometheus",
"can_run": true,
"error_string": ""
},
{
"name": "selftest",
"can_run": true,
"error_string": ""
},
{
"name": "smart",
"can_run": true,
"error_string": ""
},
{
"name": "telegraf",
"can_run": true,
"error_string": ""
},
{
"name": "telemetry",
"can_run": true,
"error_string": ""
},
{
"name": "zabbix",
"can_run": true,
"error_string": ""
}
]
}
[cuser@ceph01 ~]$
[cuser@ceph01 ~]$ sudo ceph dashboard create-self-signed-cert
Self-signed certificate created
[cuser@ceph01 ~]$
[cuser@ceph01 ~]$ sudo ceph dashboard set-login-credentials test <pass>
Username and password updated
[cuser@ceph01 ~]$
[cuser@ceph01 ~]$ sudo ceph mgr services
{
"dashboard": "https://172.16.10.111:8080/"
}
[cuser@ceph01 ~]$
[cuser@ceph01 ~]$ sudo ceph config dump
WHO MASK LEVEL OPTION VALUE RO
mgr unknown mgr/dashboard/password <aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa> *
mgr unknown mgr/dashboard/username test *
[cuser@ceph01 ~]$
[cuser@ceph01 ~]$ sudo ceph mgr services
{
"dashboard": "https://ceph01:8080/"
}
[cuser@ceph01 ~]$
I can see ceph dashboard "https://ceph01:8080/".
I'd like to use CephFS on CentOS 7.
Before this page, please check these post.
Try to install Ceph in CentOS 7 referencing "STORAGE CLUSTER QUICK START" - AKAI TSUKI
Install Ceph in CentOS 7. - AKAI TSUKI
I'd like to use CephFS.
http://docs.ceph.com/docs/master/cephfs/createfs/
I create pools.
Rgarding to pg_num, I see below url.
http://docs.ceph.com/docs/master/rados/operations/placement-groups/
[cuser@ceph01 ~]$ sudo ceph osd pool create cephfs_data 128 pool 'cephfs_data' created [cuser@ceph01 ~]$ [cuser@ceph01 ~]$ sudo ceph osd pool create cephfs_metadata 128 Error ERANGE: pg_num 128 size 3 would mean 768 total pgs, which exceeds max 600 (mon_max_pg_per_osd 200 * num_in_osds 3) [cuser@ceph01 ~]$
Error occurred. so I lower pg_num value for cephfs_metadata.
[cuser@ceph01 ~]$ sudo ceph osd pool create cephfs_metadata 64 pool 'cephfs_metadata' created [cuser@ceph01 ~]$
[cuser@ceph01 ~]$ sudo ceph fs new testfs cephfs_metadata cephfs_data new fs with metadata pool 2 and data pool 1 [cuser@ceph01 ~]$ [cuser@ceph01 ~]$ sudo ceph fs ls name: testfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ] [cuser@ceph01 ~]$
I check status.
[cuser@ceph01 ~]$ sudo ceph osd stat
3 osds: 3 up, 3 in; epoch: e22
[cuser@ceph01 ~]$ sudo ceph osd status
+----+--------+-------+-------+--------+---------+--------+---------+-----------+
| id | host | used | avail | wr ops | wr data | rd ops | rd data | state |
+----+--------+-------+-------+--------+---------+--------+---------+-----------+
| 0 | ceph01 | 1027M | 14.9G | 0 | 0 | 0 | 0 | exists,up |
| 1 | ceph02 | 1027M | 14.9G | 0 | 0 | 0 | 0 | exists,up |
| 2 | ceph03 | 1027M | 14.9G | 0 | 0 | 0 | 0 | exists,up |
+----+--------+-------+-------+--------+---------+--------+---------+-----------+
[cuser@ceph01 ~]$
[cuser@ceph01 ~]$ sudo ceph osd versions
{
"ceph version 13.2.0 (79a10589f1f80dfe21e8f9794365ed98143071c4) mimic (stable)": 3
}
[cuser@ceph01 ~]$
[cuser@ceph01 ~]$ sudo ceph osd pool ls detail
pool 1 'cephfs_data' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 128 pgp_num 128 last_change 22 flags hashpspool stripe_width 0 application cephfs
pool 2 'cephfs_metadata' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 22 flags hashpspool stripe_width 0 application cephfs
[cuser@ceph01 ~]$
[cuser@ceph01 ~]$ sudo ceph mds stat
testfs-1/1/1 up {0=ceph01=up:active}
[cuser@ceph01 ~]$
[cuser@ceph01 ~]$ sudo cat /etc/ceph/ceph.client.admin.keyring
[client.admin]
key = *snip*
caps mds = "allow *"
caps mgr = "allow *"
caps mon = "allow *"
caps osd = "allow *"
[cuser@ceph01 ~]$
Clinet Side
I setup client to use CephFS.
[root@ceph05 ~]# chmod 600 admin.secret [root@ceph05 ~]# ls -l total 8 -rw------- 1 root root 41 Jul 15 20:10 admin.secret -rw-------. 1 root root 1329 Jul 14 21:50 anaconda-ks.cfg [root@ceph05 ~]# mkdir /mnt/mycephfs [root@ceph05 ~]# ls -l /mnt/mycephfs/ total 0 [root@ceph05 ~]# [root@ceph05 ~]# sudo mount -t ceph ceph01:/ /mnt/mycephfs -o name=admin,secretfile=/root/admin.secret [root@ceph05 ~]# [root@ceph05 ~]# df -hT Filesystem Type Size Used Avail Use% Mounted on /dev/mapper/centos-root xfs 28G 1.5G 27G 6% / devtmpfs devtmpfs 1.9G 0 1.9G 0% /dev tmpfs tmpfs 1.9G 0 1.9G 0% /dev/shm tmpfs tmpfs 1.9G 8.5M 1.9G 1% /run tmpfs tmpfs 1.9G 0 1.9G 0% /sys/fs/cgroup /dev/sda1 xfs 1014M 171M 844M 17% /boot tmpfs tmpfs 380M 0 380M 0% /run/user/0 172.16.10.111:/ ceph 15G 0 15G 0% /mnt/mycephfs [root@ceph05 ~]#
I create a file on shared disk of CephFS.
[root@ceph05 ~]# ls -l /mnt/mycephfs/ total 0 [root@ceph05 ~]# vi /mnt/mycephfs/test.txt [root@ceph05 ~]# cat /mnt/mycephfs/test.txt message [root@ceph05 ~]#
After I unmount, I can not see created file.
[root@ceph05 ~]# umount /mnt/mycephfs [root@ceph05 ~]# df -hT Filesystem Type Size Used Avail Use% Mounted on /dev/mapper/centos-root xfs 28G 1.5G 27G 6% / devtmpfs devtmpfs 1.9G 0 1.9G 0% /dev tmpfs tmpfs 1.9G 0 1.9G 0% /dev/shm tmpfs tmpfs 1.9G 8.5M 1.9G 1% /run tmpfs tmpfs 1.9G 0 1.9G 0% /sys/fs/cgroup /dev/sda1 xfs 1014M 171M 844M 17% /boot tmpfs tmpfs 380M 0 380M 0% /run/user/0 [root@ceph05 ~]# [root@ceph05 ~]# ls -l /mnt/mycephfs/ total 0 [root@ceph05 ~]#
I mount again.
[root@ceph05 ~]# sudo mount -t ceph ceph01:/ /mnt/mycephfs -o name=admin,secretfile=/root/admin.secret [root@ceph05 ~]# [root@ceph05 ~]# df -hT Filesystem Type Size Used Avail Use% Mounted on /dev/mapper/centos-root xfs 28G 1.5G 27G 6% / devtmpfs devtmpfs 1.9G 0 1.9G 0% /dev tmpfs tmpfs 1.9G 0 1.9G 0% /dev/shm tmpfs tmpfs 1.9G 8.5M 1.9G 1% /run tmpfs tmpfs 1.9G 0 1.9G 0% /sys/fs/cgroup /dev/sda1 xfs 1014M 171M 844M 17% /boot tmpfs tmpfs 380M 0 380M 0% /run/user/0 172.16.10.111:/ ceph 15G 0 15G 0% /mnt/mycephfs [root@ceph05 ~]# [root@ceph05 ~]# ls -l /mnt/mycephfs/ total 1 -rw-r--r-- 1 root root 8 Jul 15 20:14 test.txt [root@ceph05 ~]# cat /mnt/mycephfs/test.txt message [root@ceph05 ~]#
and I confirm status.
[cuser@ceph01 ~]$ sudo ceph fs status testfs - 1 clients ====== +------+--------+--------+---------------+-------+-------+ | Rank | State | MDS | Activity | dns | inos | +------+--------+--------+---------------+-------+-------+ | 0 | active | ceph01 | Reqs: 0 /s | 13 | 14 | +------+--------+--------+---------------+-------+-------+ +-----------------+----------+-------+-------+ | Pool | type | used | avail | +-----------------+----------+-------+-------+ | cephfs_metadata | metadata | 26.2k | 14.1G | | cephfs_data | data | 8 | 14.1G | +-----------------+----------+-------+-------+ +-------------+ | Standby MDS | +-------------+ +-------------+ MDS version: ceph version 13.2.0 (79a10589f1f80dfe21e8f9794365ed98143071c4) mimic (stable) [cuser@ceph01 ~]$
Try to install Ceph in CentOS 7 referencing "STORAGE CLUSTER QUICK START"
Before you read this page, please check below page.
Install Ceph in CentOS 7. - AKAI TSUKI
STORAGE CLUSTER QUICK START
Please check this url.
http://docs.ceph.com/docs/master/start/quick-ceph-deploy/
Install ceph-deploy tool.
[root@ceph04 ~]# yum install ceph-deploy
When I execute ceph-deploy command, "ImportError" occurred.
[root@ceph04 ~]$ mkdir my-cluster
[root@ceph04 ~]$ cd my-cluster
[cuser@ceph04 my-cluster]$ ceph-deploy --help
Traceback (most recent call last):
File "/usr/bin/ceph-deploy", line 18, in <module>
from ceph_deploy.cli import main
File "/usr/lib/python2.7/site-packages/ceph_deploy/cli.py", line 1, in <module>
import pkg_resources
ImportError: No module named pkg_resources
[cuser@ceph04 my-cluster]$
So I install python2-pip.
[cuser@ceph04 my-cluster]$ sudo yum -y install python2-pip
then I can execute ceph-deploy.
I specify ceph01 one of the nodes for the command parameter.
[cuser@ceph04 my-cluster]$ ceph-deploy new ceph01 [ceph_deploy.conf][DEBUG ] found configuration file at: /home/cuser/.cephdeploy.conf *snip* [cuser@ceph04 my-cluster]$
I confirm created files.
[cuser@ceph04 my-cluster]$ ls -1 ceph.conf ceph-deploy-ceph.log ceph.mon.keyring [cuser@ceph04 my-cluster]$
I installed ceph to ceph cluster nodes (ceph01 ceph02 ceph03).
[cuser@ceph04 my-cluster]$ ceph-deploy install ceph01 ceph02 ceph03 [ceph_deploy.conf][DEBUG ] found configuration file at: /home/cuser/.cephdeploy.conf *snip* [ceph03][DEBUG ] Complete! [ceph03][INFO ] Running command: sudo ceph --version [ceph03][DEBUG ] ceph version 13.2.0 (79a10589f1f80dfe21e8f9794365ed98143071c4) mimic (stable) [cuser@ceph04 my-cluster]$
[cuser@ceph04 my-cluster]$ ceph-deploy mon create-initial [ceph_deploy.conf][DEBUG ] found configuration file at: /home/cuser/.cephdeploy.conf *snip* [cuser@ceph04 my-cluster]$
http://docs.ceph.com/docs/master/start/quick-ceph-deploy/#create-a-cluster
This page show that there is ceph.bootstrap-rbd.keyring file on the local directory.
But ceph.bootstrap-rbd.keyring file don't exsit.
[cuser@ceph04 my-cluster]$ ls -1 ceph.bootstrap-mds.keyring ceph.bootstrap-mgr.keyring ceph.bootstrap-osd.keyring ceph.bootstrap-rgw.keyring ceph.client.admin.keyring ceph.conf ceph.conf.org ceph-deploy-ceph.log ceph.mon.keyring [cuser@ceph04 my-cluster]$
[cuser@ceph04 my-cluster]$ ceph-deploy admin ceph01 ceph02 ceph03 [ceph_deploy.conf][DEBUG ] found configuration file at: /home/cuser/.cephdeploy.conf *snip* [cuser@ceph04 my-cluster]$
Deploy a manager daemon to ceph01.
[cuser@ceph04 my-cluster]$ ceph-deploy mgr create ceph01 [ceph_deploy.conf][DEBUG ] found configuration file at: /home/cuser/.cephdeploy.conf *snip* [cuser@ceph04 my-cluster]$
I specify the unused disk /dev/sdb for each nodes.
[cuser@ceph04 my-cluster]$ ceph-deploy osd create --data /dev/sdb ceph01 [ceph_deploy.conf][DEBUG ] found configuration file at: /home/cuser/.cephdeploy.conf *snip* [ceph_deploy.osd][DEBUG ] Host ceph01 is now ready for osd use. [cuser@ceph04 my-cluster]$
[cuser@ceph04 my-cluster]$ ceph-deploy osd create --data /dev/sdb ceph02 [cuser@ceph04 my-cluster]$ ceph-deploy osd create --data /dev/sdb ceph03
I confirm status. I get "HEALTH_OK" message.
[cuser@ceph04 my-cluster]$ ssh ceph01 sudo ceph health HEALTH_OK [cuser@ceph04 my-cluster]$
Install Ceph in CentOS 7.
Preparation
PREFLIGHT CHECKLIST
http://docs.ceph.com/docs/master/start/quick-start-preflight/
I confirmed this url.
I use these nodes.
| node | role | ip |
|---|---|---|
| ceph01 | mon.node1/osd.0 | 172.16.10.111/24 |
| ceph02 | osd.1 | 172.16.10.112/24 |
| ceph03 | osd.2 | 172.16.10.113/24 |
| ceph04 | ceph-deploy | 172.16.10.114/24 |
| ceph05 | client | 172.16.10.115/24 |
In the following description, I prepared server setting at the all nodes.
The following description show a part of execution.
Install epel repository.
[root@ceph04 ~]# yum install epel-release [root@ceph04 ~]# yum update
Set NTP(chrony)
[root@ceph03 ~]# yum install chrony [root@ceph03 ~]# vi /etc/chrony.conf [root@ceph03 ~]# systemctl start chronyd [root@ceph03 ~]# systemctl enable chronyd [root@ceph04 ~]# chronyc sources 210 Number of sources = 1 MS Name/IP address Stratum Poll Reach LastRx Last sample =============================================================================== ^* ntp-a2.nict.go.jp 1 6 77 25 +165us[+2840us] +/- 5197us [root@ceph04 ~]#
Check SELinux.
[root@ceph04 ~]# getenforce Disabled [root@ceph04 ~]#
Stop(disable) Firewall. if you need to enable, you should configure firewall setting.
http://docs.ceph.com/docs/master/start/quick-start-preflight/#open-required-ports
[root@ceph04 ~]# systemctl stop firewalld [root@ceph04 ~]# systemctl disable firewalld Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service. Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service. [root@ceph04 ~]# [root@ceph04 ~]# iptables -nL Chain INPUT (policy ACCEPT) target prot opt source destination Chain FORWARD (policy ACCEPT) target prot opt source destination Chain OUTPUT (policy ACCEPT) target prot opt source destination [root@ceph04 ~]#
At this time, I don't have DNS. so I use hosts file.
[root@ceph04 ~]# cat /etc/hosts 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 172.16.10.111 ceph01 172.16.10.112 ceph02 172.16.10.113 ceph03 172.16.10.114 ceph04 172.16.10.115 ceph05 [root@ceph04 ~]#
I use cuser to access by ssh without pass.
http://docs.ceph.com/docs/master/start/quick-start-preflight/#create-a-ceph-deploy-user
[root@ceph04 ~]# useradd cuser [root@ceph04 ~]# passwd cuser
After switch cuser
[cuser@ceph04 ~]$ ssh-keygen -t rsa -b 2048 -N "" [cuser@ceph04 ~]$ ssh-copy-id cuser@ceph01 [cuser@ceph04 ~]$ ssh-copy-id cuser@ceph02 [cuser@ceph04 ~]$ ssh-copy-id cuser@ceph03 [cuser@ceph04 ~]$ ssh-copy-id cuser@ceph04
Set to use sudo.
[root@ceph04 ~]# echo "cuser ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/cuser cuser ALL = (root) NOPASSWD:ALL [root@ceph04 ~]# sudo chmod 0440 /etc/sudoers.d/cuser [root@ceph04 ~]#
[root@ceph04 ~]# cat << EOM > /etc/yum.repos.d/ceph.repo > [ceph-noarch] > name=Ceph noarch packages > baseurl=http://download.ceph.com/rpm-mimic/el7/noarch > enabled=1 > gpgcheck=1 > type=rpm-md > gpgkey=https://download.ceph.com/keys/release.asc > EOM [root@ceph04 ~]#
But instead of creating the repository configuration file,
I just need to install ceph-release as follows:
[root@ceph04 ~]# yum install ceph-release