AKAI TSUKI

System development or Technical something

Install OpenShift

ref. Try to prepare host for installation of OpenShift. - AKAI TSUKI

hosts file

at all node.

# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
172.16.10.111 opshift01 opshift01.example.com
172.16.10.112 opshift02 opshift02.example.com
172.16.10.113 opshift03 opshift03.example.com
#

inventory file

at master node.

# cat inventory_file/hosts
[OSEv3:children]
masters
nodes

[OSEv3:vars]
ansible_ssh_user=root
openshift_deployment_type=origin

openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]

openshift_disable_check=memory_availability

openshift_master_default_subdomain=apps.example.com

[masters]
opshift01.example.com

[nodes]
opshift01.example.com openshift_node_labels="{'region': 'infra', 'zone': 'default'}" openshift_schedulable=true
opshift02.example.com openshift_node_labels="{'region': 'primary', 'zone': 'west'}"
opshift03.example.com openshift_node_labels="{'region': 'primary', 'zone': 'east'}"

#

run ansible-playbook

# ansible-playbook -i inventory_file/hosts \
> ~/openshift-ansible/playbooks/byo/config.yml

*snip*

PLAY RECAP *****************************************************************************************
localhost                  : ok=9    changed=0    unreachable=0    failed=0
opshift01.example.com      : ok=561  changed=136  unreachable=0    failed=0
opshift02.example.com      : ok=232  changed=63   unreachable=0    failed=0
opshift03.example.com      : ok=232  changed=63   unreachable=0    failed=0

#

after install

# oc login -u system:admin
Logged into "https://opshift01:8443" as "system:admin" using existing credentials.

You have access to the following projects and can switch between them with 'oc project <projectname>':

  * default
    kube-public
    kube-system
    logging
    management-infra
    openshift
    openshift-infra

Using project "default".
#

# oc get node
NAME        STATUS    AGE       VERSION
opshift01   Ready     12m       v1.6.1+5115d708d7
opshift02   Ready     12m       v1.6.1+5115d708d7
opshift03   Ready     12m       v1.6.1+5115d708d7
#

# oc get pods -o wide
NAME                       READY     STATUS    RESTARTS   AGE       IP              NODE
docker-registry-1-g8b13    1/1       Running   0          13m       10.128.0.4      opshift01
registry-console-1-dznp7   1/1       Running   0          10m       10.128.0.5      opshift01
router-1-jqj4n             1/1       Running   0          14m       172.16.10.111   opshift01
# 

# oc get svc -o wide
NAME               CLUSTER-IP       EXTERNAL-IP   PORT(S)                   AGE       SELECTOR
docker-registry    172.30.255.65    <none>        5000/TCP                  14m       docker-registry=default
kubernetes         172.30.0.1       <none>        443/TCP,53/UDP,53/TCP     27m       <none>
registry-console   172.30.181.217   <none>        9000/TCP                  12m       name=registry-console
router             172.30.122.197   <none>        80/TCP,443/TCP,1936/TCP   16m       router=router
#

# oc get routes
NAME               HOST/PORT                                   PATH      SERVICES           PORT      TERMINATION   WILDCARD
docker-registry    docker-registry-default.apps.example.com              docker-registry    <all>     passthrough   None
registry-console   registry-console-default.apps.example.com             registry-console   <all>     passthrough   None
#

check config file.

# grep -C1 subdomain /etc/origin/master/master-config.yaml
routingConfig:
  subdomain:  "apps.example.com"
serviceAccountConfig:
#

# grep -A8 identityProviders /etc/origin/master/master-config.yaml
  identityProviders:
  - challenge: true
    login: true
    mappingMethod: claim
    name: htpasswd_auth
    provider:
      apiVersion: v1
      file: /etc/origin/master/htpasswd
      kind: HTPasswdPasswordIdentityProvider
#

install elasticsearch-head on CentOS 7.3

ref

install eleasticsearch on CentOS 7.3 - AKAI TSUKI

environment

hostname ip node
cent7no31 172.16.10.231 Coordinating node, elasticsearch-head

pre install

install bzip2 and lbzip2 for tar command.

# yum install bzip2
# yum install lbzip2

install git and nodejs

# yum install -y git
# yum install -y epel-release
# yum install -y nodejs

install elasticsearch-head

GitHub - mobz/elasticsearch-head: A web front end for an elastic search cluster

install

# git clone git://github.com/mobz/elasticsearch-head.git
# cd elasticsearch-head
# npm install
> phantomjs-prebuilt@2.1.16 install /root/elasticsearch-head/node_modules/phantomjs-prebuilt
> node install.js

PhantomJS not found on PATH
Download already available at /tmp/phantomjs/phantomjs-2.1.1-linux-x86_64.tar.bz2
Verified checksum of previously downloaded file
Extracting tar contents (via spawned process)
Removing /root/elasticsearch-head/node_modules/phantomjs-prebuilt/lib/phantom
Copying extracted folder /tmp/phantomjs/phantomjs-2.1.1-linux-x86_64.tar.bz2-extract-1509856016577/phantomjs-2.1.1-linux-x86_64 -> /root/elasticsearch-head/node_modules/phantomjs-prebuilt/lib/phantom
Writing location.js file
Done. Phantomjs binary available at /root/elasticsearch-head/node_modules/phantomjs-prebuilt/lib/phantom/bin/phantomjs
elasticsearch-head@0.0.0 /root/elasticsearch-head

*snip*

npm WARN optional SKIPPING OPTIONAL DEPENDENCY: fsevents@^1.0.0 (node_modules/chokidar/node_modules/fsevents):
npm WARN notsup SKIPPING OPTIONAL DEPENDENCY: Unsupported platform for fsevents@1.1.2: wanted {"os":"darwin","arch":"any"} (current: {"os":"linux","arch":"x64"})
npm WARN elasticsearch-head@0.0.0 license should be a valid SPDX license expression
#

note

When I execute npm install, error occured as follows:

# npm install
npm: relocation error: npm: symbol SSL_set_cert_cb, version libssl.so.10 not defined in file libssl.so.10 with link time reference
#

so, I execute command as follows:

# yum update openssl

start elasticsearch-head

[root@cent7no31 ~]# cd elasticsearch-head/
[root@cent7no31 elasticsearch-head]# npm run start

> elasticsearch-head@0.0.0 start /root/elasticsearch-head
> grunt server

Running "connect:server" (connect) task
Waiting forever...
Started connect web server on http://localhost:9100

configure elasticsearch

add two parameters to elasticsearch.yml

# vi /etc/elasticsearch/elasticsearch.yml

http.cors.enabled: true
http.cors.allow-origin: "*"

as a result,

# grep -v "^#" /etc/elasticsearch/elasticsearch.yml | grep -v "^$"
cluster.name: test-cluster
node.name: cent7no31
network.host: 0.0.0.0
discovery.zen.ping.unicast.hosts: ["cent7no11", "cent7no12", "cent7no13", "cent7no31"]
node.master: false
node.data: false
node.ingest: false
http.cors.enabled: true
http.cors.allow-origin: "*"
#

restart elasticsearch

# systemctl restart elasticsearch

open elastic

open http://172.16.10.231:9100/ by browser

f:id:akai_tsuki:20171105144323p:plain

change connection url to elasticsearch

change from http://localhost:9200/ to http://172.16.10.231:9200/,
and push Connect button.

f:id:akai_tsuki:20171105144331p:plain

check status Elasticsearch

ref

install elasticsearch-head on CentOS 7.3 - AKAI TSUKI
install eleasticsearch on CentOS 7.3 - AKAI TSUKI

check by curl command

[root@cent7no31 ~]# curl -X GET http://localhost:9200/
{
  "name" : "cent7no31",
  "cluster_name" : "test-cluster",
  "cluster_uuid" : "XM0VikGzTve-p7Bb_p81Rw",
  "version" : {
    "number" : "5.4.3",
    "build_hash" : "eed30a8",
    "build_date" : "2017-06-22T00:34:03.743Z",
    "build_snapshot" : false,
    "lucene_version" : "6.5.1"
  },
  "tagline" : "You Know, for Search"
}
[root@cent7no31 ~]#

cat APIs | Elasticsearch Reference [5.6] | Elastic

[root@cent7no31 ~]# curl -X GET http://localhost:9200/_cat/health
1509877640 19:27:20 test-cluster green 4 3 2 1 0 0 0 0 - 100.0%
[root@cent7no31 ~]#
[root@cent7no31 ~]# curl -X GET http://localhost:9200/_cat/health?v
epoch      timestamp cluster      status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent
1509877664 19:27:44  test-cluster green           4         3      2   1    0    0        0             0                  -                100.0%
[root@cent7no31 ~]#
[root@cent7no31 ~]# curl -X GET http://localhost:9200/_cat/master
OeS8YZ8vTU-DCZgg7xlX2g 172.16.10.213 172.16.10.213 cent7no13
[root@cent7no31 ~]#
[root@cent7no31 ~]# curl -X GET http://localhost:9200/_cat/master?v
id                     host          ip            node
OeS8YZ8vTU-DCZgg7xlX2g 172.16.10.213 172.16.10.213 cent7no13
[root@cent7no31 ~]#
[root@cent7no31 ~]# curl -X GET http://localhost:9200/_cat/nodes
172.16.10.213  6 71 0 0.00 0.01 0.05 md * cent7no13
172.16.10.212  7 69 0 0.00 0.01 0.05 md - cent7no12
172.16.10.231  5 38 0 0.00 0.01 0.05 -  - cent7no31
172.16.10.211 10 69 0 0.00 0.01 0.05 md - cent7no11
[root@cent7no31 ~]#
[root@cent7no31 ~]# curl -X GET http://localhost:9200/_cat/nodes?v
ip            heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
172.16.10.213            6          71   0    0.00    0.01     0.05 md        *      cent7no13
172.16.10.212            7          69   0    0.00    0.01     0.05 md        -      cent7no12
172.16.10.231            5          38   0    0.00    0.01     0.05 -         -      cent7no31
172.16.10.211           10          69   0    0.00    0.01     0.05 md        -      cent7no11
[root@cent7no31 ~]#

install eleasticsearch on CentOS 7.3

ref

install logstash on CentOS 7.3 - AKAI TSUKI
install kibana on CentOS 7.3 - AKAI TSUKI

environment

hostname ip node
cent7no31 172.16.10.231 Coordinating node
cent7no11 172.16.10.211 Master/Data node
cent7no12 172.16.10.212 Master/Data node
cent7no13 172.16.10.213 Master/Data node

install

install java in all node

# yum install -y java-1.8.0-openjdk.x86_64 java-1.8.0-openjdk-devel.x86_64

install java in all node

# rpm -ivh https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.4.3.rpm

configure

# vi /etc/elasticsearch/elasticsearch.yml

cent7no31

[root@cent7no31 elasticsearch]# grep -v "^#" /etc/elasticsearch/elasticsearch.yml | grep -v "^$"
cluster.name: test-cluster
node.name: cent7no31
network.host: 172.16.10.231
discovery.zen.ping.unicast.hosts: ["cent7no11", "cent7no12", "cent7no13", "cent7no31"]
node.master: false
node.data: false
node.ingest: false
[root@cent7no31 elasticsearch]#

cent7no11

[root@cent7no11 ~]# grep -v "^#" /etc/elasticsearch/elasticsearch.yml | grep -v "^$"
cluster.name: test-cluster
node.name: cent7no11
network.host: 172.16.10.211
discovery.zen.ping.unicast.hosts: ["cent7no11", "cent7no12", "cent7no13", "cent7no31"]
discovery.zen.minimum_master_nodes: 2
node.master: true
node.data: true
node.ingest: false
[root@cent7no11 ~]#

cent7no12

[root@cent7no12 elasticsearch]# grep -v "^#" /etc/elasticsearch/elasticsearch.yml | grep -v "^$"
cluster.name: test-cluster
node.name: cent7no12
network.host: 172.16.10.212
discovery.zen.ping.unicast.hosts: ["cent7no11", "cent7no12", "cent7no13", "cent7no31"]
discovery.zen.minimum_master_nodes: 2
node.master: true
node.data: true
node.ingest: false
[root@cent7no12 elasticsearch]#

cent7no13

[root@cent7no13 elasticsearch]# grep -v "^#" /etc/elasticsearch/elasticsearch.yml | grep -v "^$"
cluster.name: test-cluster
node.name: cent7no13
network.host: 172.16.10.213
discovery.zen.ping.unicast.hosts: ["cent7no11", "cent7no12", "cent7no13", "cent7no31"]
discovery.zen.minimum_master_nodes: 2
node.master: true
node.data: true
node.ingest: false
[root@cent7no13 elasticsearch]#

install logstash on CentOS 7.3

environment

hostname ip
cent7no41 172.16.10.241

install

install java

[root@cent7no41 ~]# yum install -y java-1.8.0-openjdk.x86_64 java-1.8.0-openjdk-devel.x86_64

install logstash

[root@cent7no41 ~]# rpm -ivh https://artifacts.elastic.co/downloads/logstash/logstash-5.4.3.rpm

check status

[root@cent7no41 ~]# systemctl status logstash
● logstash.service - logstash
   Loaded: loaded (/etc/systemd/system/logstash.service; disabled; vendor preset: disabled)
   Active: inactive (dead)
[root@cent7no41 ~]#

install kibana on CentOS 7.3

environment

hostname ip
cent7no31 172.16.10.231

install

install java

[root@cent7no31 ~]# yum install -y java-1.8.0-openjdk.x86_64 java-1.8.0-openjdk-devel.x86_64
*snip*
Dependency Updated:
  nss-softokn.x86_64 0:3.28.3-8.el7_4           nss-softokn-freebl.x86_64 0:3.28.3-8.el7_4

Complete!
[root@cent7no31 ~]#

install kibana

[root@cent7no31 ~]# rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch
[root@cent7no31 ~]# rpm -ivh  https://artifacts.elastic.co/downloads/kibana/kibana-5.4.3-x86_64.rpm
Retrieving https://artifacts.elastic.co/downloads/kibana/kibana-5.4.3-x86_64.rpm
Preparing...                          ################################# [100%]
Updating / installing...
   1:kibana-5.4.3-1                   ################################# [100%]
[root@cent7no31 ~]#

configure server.host

[root@cent7no31 ~]# vi /etc/kibana/kibana.yml

#server.host: "localhost"
server.host: "172.16.10.231"

start kibana

[root@cent7no31 ~]# systemctl status kibana
● kibana.service - Kibana
   Loaded: loaded (/etc/systemd/system/kibana.service; disabled; vendor preset: disabled)
   Active: inactive (dead)
[root@cent7no31 ~]# systemctl start kibana
[root@cent7no31 ~]#

access to kibana by browser
f:id:akai_tsuki:20171104213725p:plain