AKAI TSUKI

System development or Technical something

try nginx for reverse proxy

try Nginx

ref. https://serverfault.com/questions/760569/sent-http-variables-disappear-in-nginx-in-some-circumstances

client -> nginx -> httpd

apache httpd

docker images

[root@vm01 work_a]# docker images
REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
nginx               1.17.8              2073e0bcb60e        2 weeks ago         127MB
httpd               2.4.41              c562eeace183        2 weeks ago         165MB
hello-world         latest              fce289e99eb9        13 months ago       1.84kB
[root@vm01 work_a]#

index.html (sample)

[root@vm01 work_a]# cat index.html
<!DOCTYPE html>
<html>
<head>
  <title>test title</title>
</head>
<body>
  <h1>Header</h1>
  <p>This is test message.</p>
</body>
</html>

[root@vm01 work_a]#

docker run for httpd

[root@vm01 work_a]# ls -l
total 4
-rw-r--r-- 1 root root 139 Feb 18 12:00 index.html
[root@vm01 work_a]#
[root@vm01 work_a]# docker run -dit --name my-a -v "$PWD":/usr/local/apache2/htdocs/ httpd:2.4.41
78b09dbbfafc21c5eb40e4ba25797735fa93f6e9fb6b175b6013c0a8fd468478
[root@vm01 work_a]#
[root@vm01 work_a]# docker ps
CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS               NAMES
78b09dbbfafc        httpd:2.4.41        "httpd-foreground"       4 seconds ago       Up 3 seconds        80/tcp              my-a
[root@vm01 work_a]#

[root@vm01 work_a]# docker inspect --format '{{ .NetworkSettings.IPAddress }}' my-a
172.17.0.2
[root@vm01 work_a]#

check to send request

[root@vm01 work_a]# curl -v http://172.17.0.2/
* About to connect() to 172.17.0.2 port 80 (#0)
*   Trying 172.17.0.2...
* Connected to 172.17.0.2 (172.17.0.2) port 80 (#0)
> GET / HTTP/1.1
> User-Agent: curl/7.29.0
> Host: 172.17.0.2
> Accept: */*
>
< HTTP/1.1 200 OK
< Date: Tue, 18 Feb 2020 03:09:08 GMT
< Server: Apache/2.4.41 (Unix)
< Last-Modified: Tue, 18 Feb 2020 03:00:51 GMT
< ETag: "8b-59ed0e1267770"
< Accept-Ranges: bytes
< Content-Length: 139
< Content-Type: text/html
<
<!DOCTYPE html>
<html>
<head>
  <title>test title</title>
</head>
<body>
  <h1>Header</h1>
  <p>This is test message.</p>
</body>
</html>

* Connection #0 to host 172.17.0.2 left intact
[root@vm01 work_a]#

basic config

config for nginx

[root@vm01 work_n]# ls -l
total 4
-rw-r--r-- 1 root root 775 Feb 18 11:35 nginx.conf
[root@vm01 work_n]#
[root@vm01 work_n]# cat nginx.conf
user  nginx;
worker_processes  1;

error_log  /var/log/nginx/error.log warn;
pid        /var/run/nginx.pid;


events {
    worker_connections  1024;
}


http {
    include       /etc/nginx/mime.types;
    default_type  application/octet-stream;

    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"'
                      'response_context "$sent_http_content_type" '
                      'etag "$sent_http_etag" ';

    access_log  /var/log/nginx/access.log  main;

    sendfile        on;
    #tcp_nopush     on;

    keepalive_timeout  65;

    #gzip  on;

#    include /etc/nginx/conf.d/*.conf;

    server {
        listen 80;

        location /apache/ {
            proxy_pass http://my-a/;
        }
    }
}

[root@vm01 work_n]#

run nginx container

[root@vm01 work_n]# docker run --rm --name my-n -v /root/work_n/nginx.conf:/etc/nginx/nginx.conf:ro -it --link my-a nginx:1.17.8

check request

I got httpd response via nginx(reverse proxy).

[root@vm01 work_n]# curl -v http://172.17.0.3/apache/index.html
* About to connect() to 172.17.0.3 port 80 (#0)
*   Trying 172.17.0.3...
* Connected to 172.17.0.3 (172.17.0.3) port 80 (#0)
> GET /apache/index.html HTTP/1.1
> User-Agent: curl/7.29.0
> Host: 172.17.0.3
> Accept: */*
>
< HTTP/1.1 200 OK
< Server: nginx/1.17.8
< Date: Tue, 18 Feb 2020 03:42:45 GMT
< Content-Type: text/html
< Content-Length: 139
< Connection: keep-alive
< Last-Modified: Tue, 18 Feb 2020 03:00:51 GMT
< ETag: "8b-59ed0e1267770"
< Accept-Ranges: bytes
<
<!DOCTYPE html>
<html>
<head>
  <title>test title</title>
</head>
<body>
  <h1>Header</h1>
  <p>This is test message.</p>
</body>
</html>

* Connection #0 to host 172.17.0.3 left intact
[root@vm01 work_n]#

get access log

[root@vm01 work_n]# docker run --rm --name my-n -v /root/work_n/nginx.conf:/etc/nginx/nginx.conf:ro -it --link my-a nginx:1.17.8
172.17.0.1 - - [18/Feb/2020:03:42:45 +0000] "GET /apache/index.html HTTP/1.1" 200 139 "-" "curl/7.29.0" "-"response_context "text/html" etag "\x228b-59ed0e1267770\x22"

sample case 2

config

[root@vm01 work_n]# cat nginx.conf
user  nginx;
worker_processes  1;

error_log  /var/log/nginx/error.log warn;
pid        /var/run/nginx.pid;


events {
    worker_connections  1024;
}


http {
    include       /etc/nginx/mime.types;
    default_type  application/octet-stream;

    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"'
                      'response_context "$sent_http_content_type" '
                      'etag "$sent_http_etag" ';

    access_log  /var/log/nginx/access.log  main;

    sendfile        on;
    #tcp_nopush     on;

    keepalive_timeout  65;

    #gzip  on;

    #include /etc/nginx/conf.d/*.conf;

    server {
        listen 80;

        location /apache/ {
            proxy_pass http://my-a/;

            set $debug_msg $sent_http_etag;

            add_header X-debug-message "this is debug message for /apache/";
        }
    }
}

[root@vm01 work_n]#

request

after docker run, I sent request.

[root@vm01 work_n]# curl -v http://172.17.0.3/apache/index.html
* About to connect() to 172.17.0.3 port 80 (#0)
*   Trying 172.17.0.3...
* Connected to 172.17.0.3 (172.17.0.3) port 80 (#0)
> GET /apache/index.html HTTP/1.1
> User-Agent: curl/7.29.0
> Host: 172.17.0.3
> Accept: */*
>
< HTTP/1.1 200 OK
< Server: nginx/1.17.8
< Date: Tue, 18 Feb 2020 03:56:14 GMT
< Content-Type: text/html
< Content-Length: 139
< Connection: keep-alive
< Last-Modified: Tue, 18 Feb 2020 03:00:51 GMT
< ETag: "8b-59ed0e1267770"
< Accept-Ranges: bytes
< X-debug-message: this is debug message for /apache/
<
<!DOCTYPE html>
<html>
<head>
  <title>test title</title>
</head>
<body>
  <h1>Header</h1>
  <p>This is test message.</p>
</body>
</html>

* Connection #0 to host 172.17.0.3 left intact
[root@vm01 work_n]#

log

The "etag" value is empty("-") as follows:

[root@vm01 work_n]# docker run --rm --name my-n -v /root/work_n/nginx.conf:/etc/nginx/nginx.conf:ro -it --link my-a nginx:1.17.8
172.17.0.1 - - [18/Feb/2020:03:56:14 +0000] "GET /apache/index.html HTTP/1.1" 200 139 "-" "curl/7.29.0" "-"response_context "text/html" etag "-"

sample case 3

config

[root@vm01 work_n]# cat nginx.conf
user  nginx;
worker_processes  1;

error_log  /var/log/nginx/error.log warn;
pid        /var/run/nginx.pid;


events {
    worker_connections  1024;
}


http {
    include       /etc/nginx/mime.types;
    default_type  application/octet-stream;

    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"'
                      'response_context "$sent_http_content_type" '
                      'etag "$sent_http_etag" ';

    access_log  /var/log/nginx/access.log  main;

    sendfile        on;
    #tcp_nopush     on;

    keepalive_timeout  65;

    #gzip  on;

    #include /etc/nginx/conf.d/*.conf;

    map $sent_http_etag $debug_msg {
        default $sent_http_etag;
    }

    server {
        listen 80;

        location /apache/ {
            proxy_pass http://my-a/;

            add_header X-debug-message "this is debug message for /apache/ $debug_msg";
        }
    }
}

[root@vm01 work_n]#

docker run & send request & check response

[root@vm01 work_n]# docker run --rm --name my-n -v /root/work_n/nginx.conf:/etc/nginx/nginx.conf:ro -it --link my-a nginx:1.17.8
172.17.0.1 - - [18/Feb/2020:04:15:07 +0000] "GET /apache/index.html HTTP/1.1" 200 139 "-" "curl/7.29.0" "-"response_context "text/html" etag "\x228b-59ed0e1267770\x22"

I got "debug_msg" from "$sent_http_etag" var.

[root@vm01 work_n]# curl -v http://172.17.0.3/apache/index.html
* About to connect() to 172.17.0.3 port 80 (#0)
*   Trying 172.17.0.3...
* Connected to 172.17.0.3 (172.17.0.3) port 80 (#0)
> GET /apache/index.html HTTP/1.1
> User-Agent: curl/7.29.0
> Host: 172.17.0.3
> Accept: */*
>
< HTTP/1.1 200 OK
< Server: nginx/1.17.8
< Date: Tue, 18 Feb 2020 04:15:07 GMT
< Content-Type: text/html
< Content-Length: 139
< Connection: keep-alive
< Last-Modified: Tue, 18 Feb 2020 03:00:51 GMT
< ETag: "8b-59ed0e1267770"
< Accept-Ranges: bytes
< X-debug-message: this is debug message for /apache/ "8b-59ed0e1267770"
<
<!DOCTYPE html>
<html>
<head>
  <title>test title</title>
</head>
<body>
  <h1>Header</h1>
  <p>This is test message.</p>
</body>
</html>

* Connection #0 to host 172.17.0.3 left intact
[root@vm01 work_n]#

try to install kubernetes 1.15 using kubeadm

ref

Installing kubeadm https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/

Kubernetes on CentOS7 2019年06月04日に更新 https://qiita.com/nagase/items/15726e37057e7cc3b8cd

VM

node ip hostname role
host01 172.16.10.121 host01 master
host02 172.16.10.122 host02 worker
host03 172.16.10.123 host03 worker
host04 172.16.10.124 host04 router

setting

  • yum update
  • SELinux disable
  • chronyd enable
  • firewalld disable

Before you begin

One or more machines running one of:

CentOS 7

2 GB or more of RAM per machine (any less will leave little room for your apps)

4 GB (VM)

2 CPUs or more

2 vCPU (VM)

Full network connectivity between all machines in the cluster (public or private network is fine)

OK.

Unique hostname, MAC address, and product_uuid for every node. See here for more details.

I check MAC address by "ip a s enp0s3".
I check product_uuid by "cat /sys/class/dmi/id/product_uuid" command.

Certain ports are open on your machines. See here for more details.

For simplicity, I disable firewalld.

Swap disabled. You MUST disable swap in order for the kubelet to work properly

I edit "/etc/fstab" file.
A line for swap in fstab file is commented out.

[root@host01 ~]# cat /etc/fstab

/dev/mapper/centos-root /                       xfs     defaults        0 0
UUID=50db31a1-3ec7-4b45-aa80-c4b207b16396 /boot                   xfs     defaults        0 0
#/dev/mapper/centos-swap swap                    swap    defaults        0 0
[root@host01 ~]#
[root@host01 ~]# free -h
              total        used        free      shared  buff/cache   available
Mem:           3.7G         83M        3.5G        8.5M        161M        3.4G
Swap:            0B          0B          0B
[root@host01 ~]#
[root@host01 ~]# free -h
              total        used        free      shared  buff/cache   available
Mem:           3.7G        161M        3.2G        8.5M        356M        3.3G
Swap:            0B          0B          0B
[root@host01 ~]# systemctl disable firewalld
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@host01 ~]# systemctl stop firewalld
[root@host01 ~]#

Installing runtime

https://docs.docker.com/install/linux/docker-ce/centos/ https://kubernetes.io/docs/setup/production-environment/container-runtimes/

On each of your machines, install Docker. Version 18.06.2 is recommended, but 1.11, 1.12, 1.13, 17.03 and 18.09 are known to work as well. Keep track of the latest verified Docker version in the Kubernetes release notes.

I install Version 18.09.8 .

# yum install -y yum-utils \
  device-mapper-persistent-data \
  lvm2
# yum-config-manager \
    --add-repo \
    https://download.docker.com/linux/centos/docker-ce.repo
[root@host04 ~]# yum list docker-ce --showduplicates | sort -r
 * updates: ftp.riken.jp
Loading mirror speeds from cached hostfile
Loaded plugins: fastestmirror
 * extras: ftp.riken.jp
docker-ce.x86_64            3:19.03.1-3.el7                     docker-ce-stable
docker-ce.x86_64            3:19.03.0-3.el7                     docker-ce-stable
docker-ce.x86_64            3:18.09.8-3.el7                     docker-ce-stable
docker-ce.x86_64            3:18.09.7-3.el7                     docker-ce-stable
docker-ce.x86_64            3:18.09.6-3.el7                     docker-ce-stable
docker-ce.x86_64            3:18.09.5-3.el7                     docker-ce-stable
docker-ce.x86_64            3:18.09.4-3.el7                     docker-ce-stable
docker-ce.x86_64            3:18.09.3-3.el7                     docker-ce-stable
docker-ce.x86_64            3:18.09.2-3.el7                     docker-ce-stable
docker-ce.x86_64            3:18.09.1-3.el7                     docker-ce-stable
docker-ce.x86_64            3:18.09.0-3.el7                     docker-ce-stable
docker-ce.x86_64            18.06.3.ce-3.el7                    docker-ce-stable
docker-ce.x86_64            18.06.2.ce-3.el7                    docker-ce-stable
docker-ce.x86_64            18.06.1.ce-3.el7                    docker-ce-stable
docker-ce.x86_64            18.06.0.ce-3.el7                    docker-ce-stable
docker-ce.x86_64            18.03.1.ce-1.el7.centos             docker-ce-stable
docker-ce.x86_64            18.03.0.ce-1.el7.centos             docker-ce-stable
docker-ce.x86_64            17.12.1.ce-1.el7.centos             docker-ce-stable
docker-ce.x86_64            17.12.0.ce-1.el7.centos             docker-ce-stable
docker-ce.x86_64            17.09.1.ce-1.el7.centos             docker-ce-stable
docker-ce.x86_64            17.09.0.ce-1.el7.centos             docker-ce-stable
docker-ce.x86_64            17.06.2.ce-1.el7.centos             docker-ce-stable
docker-ce.x86_64            17.06.1.ce-1.el7.centos             docker-ce-stable
docker-ce.x86_64            17.06.0.ce-1.el7.centos             docker-ce-stable
docker-ce.x86_64            17.03.3.ce-1.el7                    docker-ce-stable
docker-ce.x86_64            17.03.2.ce-1.el7.centos             docker-ce-stable
docker-ce.x86_64            17.03.1.ce-1.el7.centos             docker-ce-stable
docker-ce.x86_64            17.03.0.ce-1.el7.centos             docker-ce-stable
 * base: ftp.riken.jp
Available Packages
[root@host04 ~]#
## Install Docker CE.
yum install docker-ce-18.06.3.ce

## Create /etc/docker directory.
mkdir /etc/docker

# Setup daemon.
cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}
EOF
systemctl daemon-reload
systemctl start docker
systemctl enable docker

Routing setting(kernel parameter)

[root@host01 ~]# sysctl -a | grep net.bridge
net.bridge.bridge-nf-call-arptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-filter-pppoe-tagged = 0
net.bridge.bridge-nf-filter-vlan-tagged = 0
net.bridge.bridge-nf-pass-vlan-input-dev = 0
sysctl: reading key "net.ipv6.conf.all.stable_secret"
sysctl: reading key "net.ipv6.conf.default.stable_secret"
sysctl: reading key "net.ipv6.conf.docker0.stable_secret"
sysctl: reading key "net.ipv6.conf.enp0s3.stable_secret"
sysctl: reading key "net.ipv6.conf.enp0s8.stable_secret"
sysctl: reading key "net.ipv6.conf.lo.stable_secret"
[root@host01 ~]#

Installing kubeadm, kubelet and kubectl

# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF

I have already done regarding to SELinux.

Set SELinux in permissive mode (effectively disabling it)

# setenforce 0
# sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
# yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
# cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

sysctl --system
# systemctl enable --now kubelet

Using kubeadm to Create a Cluster

ref) Creating a single control-plane cluster with kubeadm
https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#check-required-ports

# kubeadm init --apiserver-advertise-address 172.16.10.121 \
  --pod-network-cidr 10.244.0.0/16

from kubeadm help,

Flags:
      --apiserver-advertise-address string   The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.
      --pod-network-cidr string              Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node.
      --service-cidr string                  Use alternative range of IP address for service VIPs. (default "10.96.0.0/12")

and if I use flannel for pod network,

For flannel to work correctly, you must pass --pod-network-cidr=10.244.0.0/16 to kubeadm init.

at the case using Docker latest version 19.3, I faced below some warning.
and I didn't set hosts file, these host node can not access by hostname.

[root@host01 ~]# kubeadm init --apiserver-advertise-address 172.16.10.121 \
>   --pod-network-cidr 10.244.0.0/16
[init] Using Kubernetes version: v1.15.2
[preflight] Running pre-flight checks
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
        [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 19.03.1. Latest validated version: 18.09
        [WARNING Hostname]: hostname "host01" could not be reached
        [WARNING Hostname]: hostname "host01": lookup host01 on 172.16.10.1:53: no such host
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [host01 localhost] and IPs [172.16.10.121 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [host01 localhost] and IPs [172.16.10.121 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [host01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.16.10.121]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.
[apiclient] All control plane components are healthy after 45.503048 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.15" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node host01 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node host01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: w093bz.o6le27ga24gm6dtc
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.16.10.121:6443 --token w093bz.o6le27ga24gm6dtc \
    --discovery-token-ca-cert-hash sha256:03ad46916fa37e5a4991d7eed0d904aaeeab2d180597b0703689862ce9d112ca
[root@host01 ~]#

At the error case that Firewalld running, I don't set port setting and swap on. The below error has occurred.

[root@host01 ~]# kubeadm init --apiserver-advertise-address 172.16.10.121 \
>   --pod-network-cidr 10.244.0.0/16
[init] Using Kubernetes version: v1.15.2
[preflight] Running pre-flight checks
        [WARNING Firewalld]: firewalld is active, please ensure ports [6443 10250] are open or your cluster may not function correctly
error execution phase preflight: [preflight] Some fatal errors occurred:
        [ERROR Swap]: running with swap on is not supported. Please disable swap
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
[root@host01 ~]#

At the success case,

[root@host01 ~]# kubeadm init --apiserver-advertise-address 172.16.10.121 \
>   --pod-network-cidr 10.244.0.0/16
[init] Using Kubernetes version: v1.15.2
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [host01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.16.10.121]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [host01 localhost] and IPs [172.16.10.121 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [host01 localhost] and IPs [172.16.10.121 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.
[apiclient] All control plane components are healthy after 45.010683 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.15" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node host01 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node host01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: <<<toke value>>>
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.16.10.121:6443 --token <<<toke value>>> \
    --discovery-token-ca-cert-hash sha256:<<<sha256 value>>>
[root@host01 ~]#

prepare user to operate kubectl command

opeuser

[root@host01 ~]# useradd opeuser
[root@host01 ~]# passwd opeuser
Changing password for user opeuser.
New password:
BAD PASSWORD: The password contains the user name in some form
Retype new password:
passwd: all authentication tokens updated successfully.
[root@host01 ~]# su - opeuser
[opeuser@host01 ~]$ pwd
/home/opeuser
[opeuser@host01 ~]$ exit
logout
[root@host01 ~]# usermod -aG wheel opeuser
[root@host01 ~]# id opeuser
uid=1000(opeuser) gid=1000(opeuser) groups=1000(opeuser),10(wheel)
[root@host01 ~]#
[root@host01 ~]# visudo

(以下の部分でNOPASSWDに変更)
## Allows people in group wheel to run all commands
# %wheel        ALL=(ALL)       ALL

## Same thing without a password
%wheel  ALL=(ALL)       NOPASSWD: ALL
[opeuser@host01 ~]$ mkdir -p $HOME/.kube
[opeuser@host01 ~]$ ls -la
total 16
drwx------  3 opeuser opeuser  96 Aug 12 15:51 .
drwxr-xr-x. 3 root    root     21 Aug 12 15:33 ..
-rw-------  1 opeuser opeuser   9 Aug 12 15:34 .bash_history
-rw-r--r--  1 opeuser opeuser  18 Oct 31  2018 .bash_logout
-rw-r--r--  1 opeuser opeuser 193 Oct 31  2018 .bash_profile
-rw-r--r--  1 opeuser opeuser 231 Oct 31  2018 .bashrc
drwxrwxr-x  2 opeuser opeuser   6 Aug 12 15:51 .kube
[opeuser@host01 ~]$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[opeuser@host01 ~]$ sudo chown $(id -u):$(id -g) $HOME/.kube/config
[opeuser@host01 ~]$ ls -l .kube/config
-rw------- 1 opeuser opeuser 5453 Aug 12 15:51 .kube/config
[opeuser@host01 ~]$

Installing a pod network add-on

# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/62e44c867a2846fefb68bd5f178daf4da3095ccb/Documentation/kube-flannel.yml
[opeuser@host01 ~]$ kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/62e44c867a2846fefb68bd5f178daf4da3095ccb/Documentation/kube-flannel.yml
podsecuritypolicy.extensions/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.extensions/kube-flannel-ds-amd64 created
daemonset.extensions/kube-flannel-ds-arm64 created
daemonset.extensions/kube-flannel-ds-arm created
daemonset.extensions/kube-flannel-ds-ppc64le created
daemonset.extensions/kube-flannel-ds-s390x created
[opeuser@host01 ~]$

Joining your nodes

[root@host02 ~]# kubeadm join 172.16.10.121:6443 --token <<<toke value>>> \
>     --discovery-token-ca-cert-hash sha256:<<<sha256 value>>>
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.15" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@host02 ~]#

After installation

[opeuser@host01 ~]$ kubectl get node
NAME     STATUS     ROLES    AGE     VERSION
host01   Ready      master   40m     v1.15.2
host02   Ready      <none>   4m12s   v1.15.2
host03   NotReady   <none>   8s      v1.15.2
host04   NotReady   <none>   4s      v1.15.2
[opeuser@host01 ~]$
[opeuser@host01 ~]$ kubectl get pod --all-namespaces -o wide
NAMESPACE     NAME                             READY   STATUS              RESTARTS   AGE     IP              NODE     NOMINATED NODE   READINESS GATES
kube-system   coredns-5c98db65d4-45snj         1/1     Running             0          39m     10.244.0.2      host01   <none>           <none>
kube-system   coredns-5c98db65d4-f7cpb         1/1     Running             0          39m     10.244.0.3      host01   <none>           <none>
kube-system   etcd-host01                      1/1     Running             0          39m     172.16.10.121   host01   <none>           <none>
kube-system   kube-apiserver-host01            1/1     Running             0          39m     172.16.10.121   host01   <none>           <none>
kube-system   kube-controller-manager-host01   1/1     Running             0          38m     172.16.10.121   host01   <none>           <none>
kube-system   kube-flannel-ds-amd64-2mh69      0/1     Init:0/1            0          12s     172.16.10.123   host03   <none>           <none>
kube-system   kube-flannel-ds-amd64-9k8tg      1/1     Running             0          12m     172.16.10.121   host01   <none>           <none>
kube-system   kube-flannel-ds-amd64-9qxm2      1/1     Running             0          4m16s   172.16.10.122   host02   <none>           <none>
kube-system   kube-flannel-ds-amd64-gl5dr      0/1     Init:0/1            0          8s      172.16.10.124   host04   <none>           <none>
kube-system   kube-proxy-6mw2k                 1/1     Running             0          39m     172.16.10.121   host01   <none>           <none>
kube-system   kube-proxy-76xvm                 0/1     ContainerCreating   0          12s     172.16.10.123   host03   <none>           <none>
kube-system   kube-proxy-fc8gk                 0/1     ContainerCreating   0          8s      172.16.10.124   host04   <none>           <none>
kube-system   kube-proxy-lph6t                 1/1     Running             0          4m16s   172.16.10.122   host02   <none>           <none>
kube-system   kube-scheduler-host01            1/1     Running             0          38m     172.16.10.121   host01   <none>           <none>
[opeuser@host01 ~]$

set Role to Node

[opeuser@host01 ~]$ kubectl label node host02 node-role.kubernetes.io/worker=
[opeuser@host01 ~]$ kubectl label node host03 node-role.kubernetes.io/worker=
[opeuser@host01 ~]$ kubectl label node host04 node-role.kubernetes.io/router=
[opeuser@host01 ~]$ kubectl get node
NAME     STATUS   ROLES    AGE   VERSION
host01   Ready    master   21h   v1.15.2
host02   Ready    worker   20h   v1.15.2
host03   Ready    worker   20h   v1.15.2
host04   Ready    router   20h   v1.15.2
[opeuser@host01 ~]$

metrics

https://github.com/kubernetes-incubator/metrics-server https://qiita.com/chataro0/items/28f8744e2781f730a0e6 https://kubernetes.io/docs/tasks/debug-application-cluster/debug-application-introspection/ https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/metrics-server.md

I can not get metrics by kubectl top node command.

[opeuser@host01 ~]$ kubectl top node
Error from server (NotFound): the server could not find the requested resource (get services http:heapster:)
[opeuser@host01 ~]$

Let's try to set up metrics-server.

[opeuser@host01 ~]$ git clone https://github.com/kubernetes-incubator/metrics-server
Cloning into 'metrics-server'...
remote: Enumerating objects: 7, done.
remote: Counting objects: 100% (7/7), done.
remote: Compressing objects: 100% (7/7), done.
remote: Total 11192 (delta 0), reused 4 (delta 0), pack-reused 11185
Receiving objects: 100% (11192/11192), 12.11 MiB | 4.69 MiB/s, done.
Resolving deltas: 100% (5834/5834), done.
[opeuser@host01 ~]$
[opeuser@host01 ~]$ cd metrics-server/deploy/1.8+/
[opeuser@host01 1.8+]$ ls -l
total 28
-rw-rw-r-- 1 opeuser opeuser 384 Aug 13 15:14 aggregated-metrics-reader.yaml
-rw-rw-r-- 1 opeuser opeuser 308 Aug 13 15:14 auth-delegator.yaml
-rw-rw-r-- 1 opeuser opeuser 329 Aug 13 15:14 auth-reader.yaml
-rw-rw-r-- 1 opeuser opeuser 298 Aug 13 15:14 metrics-apiservice.yaml
-rw-rw-r-- 1 opeuser opeuser 815 Aug 13 15:14 metrics-server-deployment.yaml
-rw-rw-r-- 1 opeuser opeuser 291 Aug 13 15:14 metrics-server-service.yaml
-rw-rw-r-- 1 opeuser opeuser 502 Aug 13 15:14 resource-reader.yaml
[opeuser@host01 1.8+]$

[opeuser@host01 metrics-server]$ cd
[opeuser@host01 ~]$ cd metrics-server/
[opeuser@host01 metrics-server]$ kubectl create -f deploy/1.8+/
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
serviceaccount/metrics-server created
deployment.extensions/metrics-server created
service/metrics-server created
clusterrole.rbac.authorization.k8s.io/system:metrics-server created
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
[opeuser@host01 metrics-server]$

We can see metrics-server pod. This pod is Running.

[opeuser@host01 ~]$ kubectl get pod --all-namespaces -o wide
NAMESPACE     NAME                              READY   STATUS    RESTARTS   AGE   IP              NODE     NOMINATED NODE   READINESS GATES
kube-system   coredns-5c98db65d4-45snj          1/1     Running   1          24h   10.244.0.5      host01   <none>           <none>
kube-system   coredns-5c98db65d4-f7cpb          1/1     Running   1          24h   10.244.0.4      host01   <none>           <none>
kube-system   etcd-host01                       1/1     Running   1          24h   172.16.10.121   host01   <none>           <none>
kube-system   kube-apiserver-host01             1/1     Running   1          24h   172.16.10.121   host01   <none>           <none>
kube-system   kube-controller-manager-host01    1/1     Running   1          24h   172.16.10.121   host01   <none>           <none>
kube-system   kube-flannel-ds-amd64-2mh69       1/1     Running   3          23h   172.16.10.123   host03   <none>           <none>
kube-system   kube-flannel-ds-amd64-9k8tg       1/1     Running   1          23h   172.16.10.121   host01   <none>           <none>
kube-system   kube-flannel-ds-amd64-9qxm2       1/1     Running   1          23h   172.16.10.122   host02   <none>           <none>
kube-system   kube-flannel-ds-amd64-gl5dr       1/1     Running   2          23h   172.16.10.124   host04   <none>           <none>
kube-system   kube-proxy-6mw2k                  1/1     Running   1          24h   172.16.10.121   host01   <none>           <none>
kube-system   kube-proxy-76xvm                  1/1     Running   1          23h   172.16.10.123   host03   <none>           <none>
kube-system   kube-proxy-fc8gk                  1/1     Running   1          23h   172.16.10.124   host04   <none>           <none>
kube-system   kube-proxy-lph6t                  1/1     Running   1          23h   172.16.10.122   host02   <none>           <none>
kube-system   kube-scheduler-host01             1/1     Running   1          24h   172.16.10.121   host01   <none>           <none>
kube-system   metrics-server-6d7c9596cb-n5nl4   1/1     Running   0          11m   10.244.1.2      host02   <none>           <none>
[opeuser@host01 ~]$ kubectl top node
error: metrics not available yet
[opeuser@host01 ~]$

edit metric-server deployment to add the flags

[opeuser@host01 ~]$ kubectl edit deploy -n kube-system metrics-server

the flags.

        args:
        - --kubelet-insecure-tls
        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname

I got top node result.

[opeuser@host01 ~]$ kubectl top node
NAME     CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%
host01   394m         19%    856Mi           23%
host02   109m         5%     294Mi           7%
host03   106m         5%     292Mi           7%
host04   92m          4%     220Mi           5%
[opeuser@host01 ~]$

Note:
when I did not set "--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname" flag,
I could see the below error message in metrics-server pod log.

E0813 06:58:40.032392       1 manager.go:111] unable to fully collect metrics: [unable to fully scrape metrics from source kubelet_summary:host02: unable to fetch metrics from Kubelet host02 (host02): Get https://host02:10250/stats/summary/: dial tcp: lookup host02 on 10.96.0.10:53: no such host, unable to fully scrape metrics from source kubelet_summary:host03: unable to fetch metrics from Kubelet host03 (host03): Get https://host03:10250/stats/summary/: dial tcp: lookup host03 on 10.96.0.10:53: no such host, unable to fully scrape metrics from source kubelet_summary:host04: unable to fetch metrics from Kubelet host04 (host04): Get https://host04:10250/stats/summary/: dial tcp: lookup host04 on 10.96.0.10:53: no such host, unable to fully scrape metrics from source kubelet_summary:host01: unable to fetch metrics from Kubelet host01 (host01): Get https://host01:10250/stats/summary/: dial tcp: lookup host01 on 10.96.0.10:53: no such host]

install kubectl

ref) https://kubernetes.io/docs/tasks/tools/install-kubectl/

[opeuser@host01 ~]$ kubectl completion bash > kubectl.txt
[opeuser@host01 ~]$ ls -l kubectl.txt
-rw-rw-r-- 1 opeuser opeuser 273831 Aug 13 15:46 kubectl.txt
[opeuser@host01 ~]$

[opeuser@host01 ~]$ sudo cp -p kubectl.txt /etc/bash_completion.d/kubectl
[opeuser@host01 ~]$ ls -l /etc/bash_completion.d/
total 344
-rw-r--r-- 1 root    root     56178 Nov 20  2018 git
-rw-r--r-- 1 root    root       829 Oct 31  2018 iprutils
-rw-rw-r-- 1 opeuser opeuser 273831 Aug 13 15:46 kubectl
-rw-r--r-- 1 root    root      1458 Nov 26  2013 redefine_filedir
-rw-r--r-- 1 root    root     11272 Oct 31  2018 yum-utils.bash
[opeuser@host01 ~]$

Load Balancer(METALLB)

https://metallb.universe.tf/

host01(master)のTaintsを確認する。

[opeuser@host01 ~]$ kubectl describe node host01 | grep Taints
Taints:             node-role.kubernetes.io/master:NoSchedule
[opeuser@host01 ~]$

host04に対して、Taintを設定する。
$ kubectl taint nodes host04 node-role.kubernetes.io/router=:NoSchedule

[opeuser@host01 ~]$ kubectl taint nodes host04 node-role.kubernetes.io/router=:NoSchedule
node/host04 tainted
[opeuser@host01 ~]$ kubectl describe node host04 | grep Taints
Taints:             node-role.kubernetes.io/router:NoSchedule
[opeuser@host01 ~]$

get the manifest for metallb.

[opeuser@host01 ~]$ wget https://raw.githubusercontent.com/google/metallb/v0.8.1/manifests/metallb.yaml
--2019-08-14 12:46:01--  https://raw.githubusercontent.com/google/metallb/v0.8.1/manifests/metallb.yaml
Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.108.133
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.108.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 5462 (5.3K) [text/plain]
Saving to: ‘metallb.yaml’

100%[=====================================================================================================================================================================>] 5,462       --.-K/s   in 0s

2019-08-14 12:46:01 (80.8 MB/s) - ‘metallb.yaml’ saved [5462/5462]

[opeuser@host01 ~]$ ls -l
total 280
-rw-rw-r-- 1 opeuser opeuser 273831 Aug 13 15:46 kubectl.txt
-rw-rw-r-- 1 opeuser opeuser   5462 Aug 14 12:46 metallb.yaml
drwxrwxr-x 9 opeuser opeuser   4096 Aug 13 15:14 metrics-server
drwxrwxr-x 4 opeuser opeuser     33 Aug 14 12:45 wk_metallb
[opeuser@host01 ~]$

and then, I edit the metallb.yaml file. routerノードにPodを配置するため。

[opeuser@host01 ~]$ cp -p metallb.yaml metallb.yaml.org
[opeuser@host01 ~]$ vi metallb.yaml
[opeuser@host01 ~]$ diff metallb.yaml metallb.yaml.org
243,244d242
<       - effect: NoSchedule
<         key: node-role.kubernetes.io/router
296,298d293
<       tolerations:
<       - effect: NoSchedule
<         key: node-role.kubernetes.io/router
[opeuser@host01 ~]$
[opeuser@host01 ~]$ kubectl apply -f metallb.yaml
namespace/metallb-system created
podsecuritypolicy.policy/speaker created
serviceaccount/controller created
serviceaccount/speaker created
clusterrole.rbac.authorization.k8s.io/metallb-system:controller created
clusterrole.rbac.authorization.k8s.io/metallb-system:speaker created
role.rbac.authorization.k8s.io/config-watcher created
clusterrolebinding.rbac.authorization.k8s.io/metallb-system:controller created
clusterrolebinding.rbac.authorization.k8s.io/metallb-system:speaker created
rolebinding.rbac.authorization.k8s.io/config-watcher created
daemonset.apps/speaker created
deployment.apps/controller created
[opeuser@host01 ~]$
[opeuser@host01 ~]$ kubectl get ns
NAME              STATUS   AGE
default           Active   45h
kube-node-lease   Active   45h
kube-public       Active   45h
kube-system       Active   45h
metallb-system    Active   84s
[opeuser@host01 ~]$

[opeuser@host01 ~]$ kubectl get all -n metallb-system -o wide
NAME                              READY   STATUS    RESTARTS   AGE     IP              NODE     NOMINATED NODE   READINESS GATES
pod/controller-7dd978fdd5-rjcjl   1/1     Running   0          5m52s   10.244.3.2      host04   <none>           <none>
pod/speaker-92tmv                 1/1     Running   0          5m52s   172.16.10.124   host04   <none>           <none>
pod/speaker-dvxhg                 1/1     Running   0          5m52s   172.16.10.122   host02   <none>           <none>
pod/speaker-v777h                 1/1     Running   0          5m52s   172.16.10.123   host03   <none>           <none>
pod/speaker-wzfxc                 1/1     Running   0          5m52s   172.16.10.121   host01   <none>           <none>



NAME                     DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR                 AGE     CONTAINERS   IMAGES                   SELECTOR
daemonset.apps/speaker   4         4         4       4            4           beta.kubernetes.io/os=linux   5m52s   speaker      metallb/speaker:v0.8.1   app=metallb,component=speaker

NAME                         READY   UP-TO-DATE   AVAILABLE   AGE     CONTAINERS   IMAGES                      SELECTOR
deployment.apps/controller   1/1     1            1           5m52s   controller   metallb/controller:v0.8.1   app=metallb,component=controller

NAME                                    DESIRED   CURRENT   READY   AGE     CONTAINERS   IMAGES                      SELECTOR
replicaset.apps/controller-7dd978fdd5   1         1         1       5m52s   controller   metallb/controller:v0.8.1   app=metallb,component=controller,pod-template-hash=7dd978fdd5




[opeuser@host01 ~]$

ref) https://metallb.universe.tf/configuration/

[opeuser@host01 ~]$ vi metallb-l2.yaml
[opeuser@host01 ~]$ cat metallb-l2.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  namespace: metallb-system
  name: config
data:
  config: |
    address-pools:
    - name: default
      protocol: layer2
      addresses:
      - 172.16.10.201-172.16.10.220
[opeuser@host01 ~]$
[opeuser@host01 ~]$ kubectl apply -f metallb-l2.yaml
configmap/config created
[opeuser@host01 ~]$

deploy Nginx

ref) https://kubernetes.io/docs/tasks/run-application/run-stateless-application-deployment/
application/deployment.yaml

[opeuser@host01 ~]$ vi deploy-nginx.yaml
[opeuser@host01 ~]$ cat deploy-nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1
        ports:
        - name: http
          containerPort: 80
[opeuser@host01 ~]$
[opeuser@host01 ~]$ kubectl get deploy
NAME    READY   UP-TO-DATE   AVAILABLE   AGE
nginx   1/1     1            1           23s
[opeuser@host01 ~]$ kubectl get pod -o wide
NAME                     READY   STATUS    RESTARTS   AGE   IP           NODE     NOMINATED NODE   READINESS GATES
nginx-6d4fbdf4df-mn2sz   1/1     Running   0          75s   10.244.2.3   host03   <none>           <none>
[opeuser@host01 ~]$

create service

ref) https://metallb.universe.tf/usage/

apiVersion: v1
kind: Service
metadata:
  name: nginx
spec:
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: nginx
  type: LoadBalancer
apiVersion: v1
kind: Service
metadata:
  name: nginx
  annotations:
    metallb.universe.tf/address-pool: production-public-ips
spec:
  ports:
  - port: 80
    targetPort: 80
  selector:
    app: nginx
  type: LoadBalancer
[opeuser@host01 ~]$ vi service-nginx.yaml
[opeuser@host01 ~]$ cat service-nginx.yaml
apiVersion: v1
kind: Service
metadata:
  name: nginx
spec:
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: nginx
  type: LoadBalancer
[opeuser@host01 ~]$ kubectl apply -f service-nginx.yaml
service/nginx created
[opeuser@host01 ~]$

add node to kubernetes cluster

kubeadm join 172.16.10.121:6443 --token <<<toke value>>> \
    --discovery-token-ca-cert-hash sha256:<<<sha256 value>>>
[opeuser@host01 ~]$ kubeadm token list
TOKEN                     TTL         EXPIRES                     USAGES                   DESCRIPTION                                                EXTRA GROUPS
<<<toke value>>>   <invalid>   2019-08-13T15:26:56+09:00   authentication,signing   The default bootstrap token generated by 'kubeadm init'.   system:bootstrappers:kubeadm:default-node-token
[opeuser@host01 ~]$
[opeuser@host01 ~]$ openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \
>    openssl dgst -sha256 -hex | sed 's/^.* //'
<<<sha256 value>>>
[opeuser@host01 ~]$

delete files in /etc/kubernetes and reboot.

[root@host05 ~]# cp -rp /etc/kubernetes /tmp/kube_bak
[root@host05 ~]# cd /etc/kubernetes/
[root@host05 kubernetes]# rm -rf *

try to join to cluster, but error.

[root@host05 ~]# kubeadm join 172.16.10.121:6443 --token <<<toke value>>> \
>     --discovery-token-ca-cert-hash sha256:<<<sha256 value>>>
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
error execution phase preflight: unable to fetch the kubeadm-config ConfigMap: failed to get config map: Unauthorized
[root@host05 ~]#

I get a new token.

[opeuser@host01 ~]$ kubeadm token create
osqrxm.ixs7hqnxwwjo8jjj
[opeuser@host01 ~]$ kubeadm token list
TOKEN                     TTL         EXPIRES                     USAGES                   DESCRIPTION                                                EXTRA GROUPS
<<<toke value>>>   <invalid>   2019-08-13T15:26:56+09:00   authentication,signing   The default bootstrap token generated by 'kubeadm init'.   system:bootstrappers:kubeadm:default-node-token
osqrxm.ixs7hqnxwwjo8jjj   23h         2019-08-15T19:02:40+09:00   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
[opeuser@host01 ~]$
[root@host05 kubernetes]# kubeadm join 172.16.10.121:6443 --token osqrxm.ixs7hqnxwwjo8jjj --discover
y-token-ca-cert-hash sha256:<<<sha256 value>>>
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.15" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[kubelet-check] Initial timeout of 40s passed.
error execution phase kubelet-start: error uploading crisocket: timed out waiting for the condition
[root@host05 kubernetes]#

I check.

kubectl label node host05 node-role.kubernetes.io/worker=

try to use zabbix api on bash

I created this script to add host/host group by Zabbix API.

#!/bin/bash
set -x

### config
user_name=user01
user_pass=pass_string
zabbix_url=http://localhost/zabbix/api_jsonrpc.php

header="Content-Type: application/json-rpc"


# ------------------------------------------
# set token
# ------------------------------------------
function set_token() {

local json_data=$(cat << EOS
{
    "jsonrpc": "2.0",
    "method": "user.login",
    "params": {
        "user": "${user_name}",
        "password": "${user_pass}"
    },
    "id": 1,
    "auth": null
}
EOS
)

token=`curl -s -H "${header}" ${zabbix_url} -d "${json_data}" | jq -r .result`

}


# ------------------------------------------
# get host
# ------------------------------------------
function get_host() {

json_data=$(cat << EOS
{
    "jsonrpc": "2.0",
    "method": "host.get",
    "params": {
      "output": [
        "hostid",
        "host"
      ],
      "selectInterfaces": [
        "interfaceid",
        "ip"
      ]
    },
    "id": 2,
    "auth": "${token}"
}
EOS
)

result=`curl -s -d  ${json_data} -H "${header}" ${zabbix_url} | jq -r .result`

}


# ------------------------------------------
# add host group
#   $1 - host group name
# ------------------------------------------
function add_host_group() {

local json_data=$(cat << EOS
{
    "jsonrpc": "2.0",
    "method": "hostgroup.create",
    "params": {
        "name": "$1"
    },
    "auth": "${token}",
    "id": 3
}
EOS
)

# echo "${json_data}"
ret_value=`curl -s -H "${header}" ${zabbix_url} -d "${json_data}" | jq -r .result.groupids[0]`

}


# ------------------------------------------
# get host group
#   $1 - host group name
# ------------------------------------------
function get_host_group() {

local json_data=$(cat << EOS
{
    "jsonrpc": "2.0",
    "method": "hostgroup.get",
    "params": {
        "output": "extend",
        "filter": {
            "name": [
                "$1"
            ]
        }
    },
    "auth": "${token}",
    "id": 4
}
EOS
)

# echo "${json_data}"
ret_value=`curl -s -H "${header}" ${zabbix_url} -d "${json_data}" | jq -r .result[0].groupid`
echo $ret_value

}


# ------------------------------------------
# add host
#   $1 - host name
#   $2 - host group id
#   $3 - ip
# ------------------------------------------
function add_host() {

local json_data=$(cat << EOS
{
    "jsonrpc": "2.0",
    "method": "host.create",
    "params": {
        "host": "$1",
        "interfaces": [
            {
                "type": 1,
                "main": 1,
                "useip": 1,
                "ip": "$3",
                "dns": "",
                "port": "10050"
            }
        ],
        "groups": [
            {
                "groupid": "$2"
            }
        ],
        "templates": [
            {
                "templateid": "10001"
            }
        ]
    },
    "auth": "${token}",
    "id": 5
}
EOS
)

# echo "${json_data}"
ret_value=`curl -s -H "${header}" ${zabbix_url} -d "${json_data}" | jq .result`
echo $ret_value

}


# ------------------------------------------
# MAIN
# ------------------------------------------

set_token
echo $token

# add group
group_name="test group33"
get_host_group "${group_name}"

if [ "null" = "${ret_value}" ]; then
  # add host group
  add_host_group "${group_name}"
  group_id=${ret_value}

else
  # already added
  group_id=${ret_value}

fi

echo ${group_id}

# add host
host_name="test_host1"
ip_address="192.168.100.10"
add_host "${host_name}" "${group_id}" "${ip_address}"

openstack bash complete

bash complete

https://docs.openstack.org/python-openstackclient/pike/cli/command-objects/complete.html

[root@localhost ~(keystone_admin)]# openstack complete | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null
[root@localhost ~(keystone_admin)]# ls -l /etc/bash_completion.d/
total 196
-rw-r--r-- 1 root root    955 Aug  2  2018 cinder.bash_completion
-rw-r--r-- 1 root root    882 Dec 12 12:53 glance
-rwxr--r-- 1 root root  11345 Oct 13  2017 gluster
-rw-r--r-- 1 root root    829 Oct 31 04:20 iprutils
-rw-r--r-- 1 root root    885 Jul 27  2018 nova
-rw-r--r-- 1 root root 100219 Apr 30 16:09 osc.bash_completion
-rwxr-xr-x 1 root root  17838 Feb 14 18:58 ovs-appctl-bashcomp.bash
-rwxr-xr-x 1 root root  28733 Feb 14 18:58 ovs-vsctl-bashcomp.bash
-rw-r--r-- 1 root root   1458 Nov 26  2013 redefine_filedir
-rw-r--r-- 1 root root  11272 Oct 31 07:58 yum-utils.bash
[root@localhost ~(keystone_admin)]#

network setting for openstack

After installation of OpenStack, akai-tsuki.hatenablog.com

Network setting

I use network 1 to access to VM in OpenStack.
and I connect to server via network 2 by ssh.

f:id:akai_tsuki:20190503173937p:plain
all_in_one_network

[root@localhost ~]# cd /etc/sysconfig/network-scripts/
[root@localhost network-scripts]# ls -ltr ifcfg-*
-rw-r--r-- 1 root root 254 Aug 24  2018 ifcfg-lo
-rw-r--r-- 1 root root 329 Apr 30 13:41 ifcfg-enp0s8
-rw-r--r-- 1 root root 424 Apr 30 14:42 ifcfg-enp0s3
-rw-r--r-- 1 root root 192 Apr 30 14:47 ifcfg-br-ex
[root@localhost network-scripts]#
[root@localhost network-scripts]# cat ifcfg-br-ex
DEVICE=br-ex
DEVICETYPE=ovs
TYPE=OVSBridge
BOOTPROTO=static
NM_CONTROLLED=no
ONBOOT=yes
HOTPLUG=no
IPADDR=172.16.10.152
PREFIX=24
GATEWAY=172.16.10.1
DNS1=172.16.10.1
DEFROUTE=yes
IPV6INIT=no
[root@localhost network-scripts]#
[root@localhost network-scripts]# cat ifcfg-enp0s3 | grep -v "^#"
NM_CONTROLLED=no
DEVICETYPE=ovs
TYPE=OVSPort
OVS_BRIDGE=br-ex
DEVICE=enp0s3
NAME=enp0s3
BOOTPROTO=none
UUID=3a0633b6-815b-4196-9756-6824a53b07ff
ONBOOT=yes
[root@localhost network-scripts]#
[root@localhost network-scripts]# cat ifcfg-enp0s8 | grep -v "^#"
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=none
IPADDR=192.168.20.10
PREFIX=24
DEFROUTE=no
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=enp0s8
UUID=054f6482-6dec-4d83-bf71-0c7b050beb79
DEVICE=enp0s8
ONBOOT=yes
[root@localhost network-scripts]#
[root@localhost network-scripts]# ip a s br-ex
7: br-ex: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN group default qlen 1000
    link/ether 08:00:27:95:13:e8 brd ff:ff:ff:ff:ff:ff
    inet 172.16.10.152/24 brd 172.16.10.255 scope global br-ex
       valid_lft forever preferred_lft forever
    inet6 fe80::bc97:d4ff:fedf:9f46/64 scope link
       valid_lft forever preferred_lft forever
[root@localhost network-scripts]#

[root@localhost network-scripts]# ip a s enp0s3
2: enp0s3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast master ovs-system state UP group default qlen 1000
    link/ether 08:00:27:95:13:e8 brd ff:ff:ff:ff:ff:ff
    inet6 fe80::a00:27ff:fe95:13e8/64 scope link
       valid_lft forever preferred_lft forever
[root@localhost network-scripts]#

[root@localhost network-scripts]# ip a s enp0s8
3: enp0s8: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 08:00:27:ba:eb:1f brd ff:ff:ff:ff:ff:ff
    inet 192.168.20.10/24 brd 192.168.20.255 scope global enp0s8
       valid_lft forever preferred_lft forever
    inet6 fe80::a00:27ff:feba:eb1f/64 scope link
       valid_lft forever preferred_lft forever
[root@localhost network-scripts]#
[root@localhost network-scripts]# ovs-vsctl list-br
br-ex
br-int
br-tun
[root@localhost network-scripts]#

[root@localhost network-scripts]# ovs-vsctl list-ports br-ex
enp0s3
phy-br-ex
[root@localhost network-scripts]#