日本搞逼视频_黄色一级片免费在线观看_色99久久_性明星video另类hd_欧美77_综合在线视频

國(guó)內(nèi)最全I(xiàn)T社區(qū)平臺(tái) 聯(lián)系我們 | 收藏本站
阿里云優(yōu)惠2
您當(dāng)前位置:首頁(yè) > 服務(wù)器 > centos安裝kubernetes1.3(一)

centos安裝kubernetes1.3(一)

來(lái)源:程序員人生   發(fā)布時(shí)間:2017-03-20 09:27:37 閱讀次數(shù):7597次

部署網(wǎng)絡(luò)結(jié)構(gòu)

117主機(jī) k8s master +k8s minion+flannel+docker
110 etcd
73主機(jī) K8s minion+docker+flannel

安裝docker

要求linux內(nèi)核3.10的版本,使用
uname -rs      #查看linux內(nèi)核版本

官方安裝文檔,很簡(jiǎn)單,直接copy paste.   
$ sudo tee /etc/yum.repos.d/docker.repo <<-'EOF'
[dockerrepo]
name=Docker Repository
baseurl=https://yum.dockerproject.org/repo/main/centos/7/
enabled=1
gpgcheck=1
gpgkey=https://yum.dockerproject.org/gpg
EOF

yum install docker-engine #安裝docker服務(wù)
systemctl enable docker.service
systemctl start docker  #啟動(dòng)docker服務(wù)
systemctl stop docker  #停止docker服務(wù)

安裝與啟動(dòng)ETCD

cd /opt/kubernetes/bin
vi start_etcd.sh
nohup  ./etcd --name etcd001 \
--initial-advertise-peer-urls http://192.168.161.110:2380 \
  --listen-peer-urls http://192.168.161.110:2380 \
  --listen-client-urls http://192.168.161.110:2379,http://127.0.0.1:2379 \
  --advertise-client-urls http://192.168.161.110:2379  \
  --initial-cluster-token etcd-cluster⑴ \
  --initial-cluster etcd001=http://192.168.161.110:2380 \
  --initial-cluster-state new &
./start_etcd.sh
查看etcd集群成員
 ./etcdctl  member list
檢查etcd集群健康狀態(tài)
./etcdctl cluster-health


主機(jī)啟動(dòng)k8s master結(jié)點(diǎn)

master結(jié)點(diǎn)啟動(dòng)kube-apiserver、kube-controller-manager、kube-scheduler3個(gè)服務(wù)

cd /opt/kubernetes/master
[root@linux⑴17 master]# vi start_k8s_master.sh 
#!/bin/sh
nohup ./kube-apiserver \
--insecure-bind-address=0.0.0.0 \
--insecure-port=8080 \
--cors_allowed_origins=.* \
--etcd_servers=http://192.168.161.110:2379 \
--v=4 --logtostderr=true  \
--log_dir=/opt/kubernetes/logs/k8s/apiserver \
--service-cluster-ip-range=10.10.10.0/24 & 

nohup ./kube-controller-manager \
--master=192.168.161.117:8080 \
--enable-hostpath-provisioner=false \
--v=1 --logtostderr=true \
--allocate-node-cidrs=true --cluster-cidr=10.1.0.0/16 \
--log_dir=/opt/kubernetes/logs/k8s/controller-manager &  
  
nohup ./kube-scheduler \
--master=192.168.161.117:8080 \
--v=1 --logtostderr=true \
--log_dir=/opt/kubernetes/logs/k8s/scheduler &


--cluster-cidr參數(shù),指定k8s service集群內(nèi)部訪問(wèn)IP子網(wǎng)段,本例中通過(guò)k8s部署service,內(nèi)部訪問(wèn)地址由此子網(wǎng)段分配.下文部署的 my-nginx-serv的內(nèi)部訪問(wèn)IP是10.10.10.112

[root@linux⑴17 master]# ./kubectl get service my-nginx-serv

NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE

my-nginx-serv 10.10.10.112 <nodes> 8080/TCP 4d

啟動(dòng)k8s minion結(jié)點(diǎn)

master結(jié)點(diǎn)啟動(dòng)kubelet、kube-proxy3個(gè)服務(wù),注意kubernets 1.3版本使用kubelet,而原來(lái)的kubcfg已沒(méi)有了
cd /opt/kubernetes/bin

vi k8s.minion.sh 
#! /bin/sh  
# start the minion  
nohup ./kubelet --address=0.0.0.0 \
--port=10250 \
--v=1 \
--log_dir=/opt/kubernetes/logs/k8s/kubelet \
--hostname_override=192.168.161.73 \
--container-runtime=docker \
--api_servers=http://192.168.161.117:8080 \
--logtostderr=false >> kublet.log 2>&1 & 
  
nohup ./kube-proxy \
--master=192.168.161.117:8080 \
--log_dir=/opt/kubernetes/logs/k8s/proxy  \
--v=1 --logtostderr=false >> proxy.log 2>&1 &


./k8s.minion.sh 

/***主機(jī)1上履行***/

nohup ./flanneld -etcd-endpoints=http://192.168.161.110:2379 -remote=192.168.161.110:8888 >> /opt/kubernetes/flanenl.log 2>&1 &source /run/flannel/subnet.env

/***主機(jī)2上履行***/

nohup ./flanneld -etcd-endpoints=http://192.168.161.110:2379 -remote=192.168.161.110:8888 >> /opt/kubernetes/flanenl.log 2>&1 &
source /run/flannel/subnet.env

配置flanneld網(wǎng)絡(luò)

cd /opt/kubernetes/bin
nohup ./flanneld --listen=0.0.0.0:8888 >> /opt/kubernetes/logs/flanneld.log 2>&1 &
/**在etcd服務(wù)器上設(shè)置子網(wǎng)*/
etcdctl set /coreos.com/network/config '{ "Network": "10.1.0.0/16" }'

配置docker子網(wǎng),重啟minon結(jié)點(diǎn)

source /run/flannel/subnet.env 
ifconfig docker0 ${FLANNEL_SUBNET}
setsid docker daemon --bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU}  --insecure-registry=192.168.161.117:5000 --registry-mirror=https://0ai1grsq.mirror.aliyuncs.com >docker.log 2>&1&
./k8s.minion.sh  #重新啟動(dòng)kubelet、 kube-proxy

啟動(dòng)成功后,分別查看minon主機(jī)的子網(wǎng)情況,73的子網(wǎng)是10.1.59.*子網(wǎng),117分配的是10.1.83.*子網(wǎng).是上文以下命令

etcdctl set /coreos.com/network/config '{ "Network": "10.1.0.0/16" }'
設(shè)置的

在73主機(jī)查子網(wǎng)配置情況

[root@linux⑺3 ~]# ifconfig -a
docker0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1472
        inet 10.1.59.1 netmask 255.255.255.0  broadcast 0.0.0.0
        inet6 fe80::42:75ff:fe18:4dd  prefixlen 64  scopeid 0x20<link>
        ether 02:42:75:18:04:dd  txqueuelen 0  (Ethernet)
        RX packets 1623171  bytes 625325902 (596.3 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 1518744  bytes 474482055 (452.5 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
flannel0: flags=4305<UP,POINTOPOINT,RUNNING,NOARP,MULTICAST>  mtu 1472
        inet 10.1.59.0 netmask 255.255.0.0  destination 10.1.59.0
        unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00  txqueuelen 500  (UNSPEC)
        RX packets 879524  bytes 61983130 (59.1 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 895098  bytes 530931772 (506.3 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

在117主機(jī)查子網(wǎng)配置情況

[root@linux⑸f117 ~]# ifconfig -a
docker0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1472
        inet 10.1.83.1 netmask 255.255.255.0  broadcast 0.0.0.0
        inet6 fe80::42:aeff:fe27:e5d1  prefixlen 64  scopeid 0x20<link>
        ether 02:42:ae:27:e5:d1  txqueuelen 0  (Ethernet)
        RX packets 1929341  bytes 1078499314 (1.0 GiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 1851177  bytes 1376911611 (1.2 GiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

flannel0: flags=4305<UP,POINTOPOINT,RUNNING,NOARP,MULTICAST>  mtu 1472
        inet 10.1.83.0  netmask 255.255.0.0  destination 10.1.83.0
        unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00  txqueuelen 500  (UNSPEC)
        RX packets 880550  bytes 521809015 (497.6 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 884839  bytes 61525762 (58.6 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

部署kubernetes dashboard

[root@linux⑸f117 master]# more kubernetes-dashboard.yaml
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE⑵.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Configuration to deploy release version of the Dashboard UI.
#
# Example usage: kubectl create -f <this_file>

kind: Deployment
apiVersion: extensions/v1beta1
metadata:
  labels:
    app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  replicas: 1
  selector:
    matchLabels:
      app: kubernetes-dashboard
  template:
    metadata:
      labels:
        app: kubernetes-dashboard
    spec:
      containers:
      - name: kubernetes-dashboard
        image: designer9418/kubernetes-dashboard-amd64
        imagePullPolicy: Always
        ports:
        - containerPort: 9090
          protocol: TCP
        args:
          # Uncomment the following line to manually specify Kubernetes API server Host
          # If not specified, Dashboard will attempt to auto discover the API server and connect
          # to it. Uncomment only if the default does not work.
           - --apiserver-host=http://192.168.161.117:8080
        livenessProbe:
          httpGet:
            path: /
            port: 9090
          initialDelaySeconds: 30
          timeoutSeconds: 30
---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  type: NodePort
  ports:
  - port: 80
    targetPort: 9090
  selector:
    app: kubernetes-dashboard

kubectlcreate-fkubernetes-dashboard.yaml

輸入 http://192.168.161.117:8080/ui訪問(wèn)dashboard

防火墻

centos7
查看防火墻狀態(tài)。
systemctl status firewalld

臨時(shí)關(guān)閉防火墻命令。重啟電腦后,防火墻自動(dòng)起來(lái)。
systemctl stop firewalld

永久關(guān)閉防火墻命令。重啟后,防火墻不會(huì)自動(dòng)啟動(dòng)。
systemctl disable firewalld


使用k8s部署POD.Service,下文待續(xù)>>>


生活不易,碼農(nóng)辛苦
如果您覺(jué)得本網(wǎng)站對(duì)您的學(xué)習(xí)有所幫助,可以手機(jī)掃描二維碼進(jìn)行捐贈(zèng)
程序員人生
------分隔線----------------------------
分享到:
------分隔線----------------------------
關(guān)閉
程序員人生
主站蜘蛛池模板: 日韩精品无码一区二区三区 | 中文字幕免费在线视频 | 国产亚洲视频在线 | 欧美成视频| 操人视频免费看 | 久热久 | 成人影院网站ww555久久精品 | 国产成人免费视频 | 日韩看片 | 欧美在线视频一区 | 91av视频免费在线观看 | 97麻豆 | 国产99在线 | 亚洲 | 成人国产精品免费观看视频 | 一级aaa级毛片午夜在线播放 | 国产亚洲欧美另类一区二区三区 | 国产在线视频综合 | 欧美日韩高清在线一区 | 动漫卡通精品一区二区三区介绍 | 午夜精品久久久久久久96蜜桃 | 久久九色 | 日韩精品久久久久久久软件91 | 高清一区二区三区 | 国产精品一区二区久久久久 | 精品国产一区二区三区免费 | 美女很黄很黄免费的 | 国产精品精品久久久 | 国产成人深夜视频51 | 成人区精品一区二区 | 欧美a√ | 男操女视频在线观看 | 国产一区二区欧美 | 亚洲国产激情 | 欧美精品一区二区久久 | 日韩欧美一卡二卡 | 亚洲视频免费在线观看 | 美女很黄很黄免费的 | 99成人精品 | 中文字幕日韩一区二区 | 男女精品视频 | 精品国产鲁一鲁一区二区张丽 |