经验首页 前端设计 程序设计 Java相关 移动开发 数据库/运维 软件/图像 大数据/云计算 其他经验
当前位置:技术经验 » 数据库/运维 » Kubernetes » 查看文章
kubernetes集群部署
来源:cnblogs  作者:艾艾贴  时间:2019/1/11 10:11:29  对本文有异议

安装环境

172.19.2.49(kube-apiserver,kube-controller-manager,kube-dns,kube-proxy,kubectl,etcd)

172.19.2.50(kubectl,etcd,kube-proxy)

172.19.2.51(kubectl,etcd,kube-proxy)

一、创建 CA 证书和秘钥,并提前设置环境变量

172.19.2.49上进行操作

  1. mkdir -pv /root/local/bin
  2. vim /root/local/bin/environment.sh
  3. #!/usr/bin/bash
  4. export PATH=/root/local/bin:$PATH
  5. # TLS Bootstrapping 使用的 Token,可以使用命令 head -c 16 /dev/urandom | od -An -t x | tr -d ' ' 生成
  6. BOOTSTRAP_TOKEN="11d74483444fb57f6a1cc114ed715949"
  7. # 最好使用 主机未用的网段 来定义服务网段和 Pod 网段
  8. # 服务网段 (Service CIDR),部署前路由不可达,部署后集群内使用IP:Port可达
  9. SERVICE_CIDR="10.254.0.0/16"
  10. # POD 网段 (Cluster CIDR),部署前路由不可达,**部署后**路由可达(flanneld保证)
  11. CLUSTER_CIDR="172.30.0.0/16"
  12. # 服务端口范围 (NodePort Range)
  13. export NODE_PORT_RANGE="8400-9000"
  14. # etcd 集群服务地址列表
  15. export ETCD_ENDPOINTS="https://172.19.2.49:2379,https://172.19.2.50:2379,https://172.19.2.51:2379"
  16. # flanneld 网络配置前缀
  17. export FLANNEL_ETCD_PREFIX="/kubernetes/network"
  18. # kubernetes 服务 IP (一般是 SERVICE_CIDR 中第一个IP)
  19. export CLUSTER_KUBERNETES_SVC_IP="10.254.0.1"
  20. # 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)
  21. export CLUSTER_DNS_SVC_IP="10.254.0.2"
  22. # 集群 DNS 域名
  23. export CLUSTER_DNS_DOMAIN="cluster.local."
  24. # 当前部署的机器名称(随便定义,只要能区分不同机器即可)
  25. export NODE_NAME=etcd-host0 
  26. # 当前部署的机器 IP
  27. export NODE_IP=172.19.2.49
  28. # etcd 集群所有机器 IP
  29. export NODE_IPS="172.19.2.49 172.19.2.50 172.19.2.51"
  30. # etcd 集群间通信的IP和端口 
  31. export ETCD_NODES=etcd-host0=https://172.19.2.49:2380,etcd-host1=https://172.19.2.50:2380,etcd-host2=https://172.19.2.51:2380
  32. # 替换为 kubernetes maste 集群任一机器 IP
  33. export MASTER_IP=172.19.2.49
  34. export KUBE_APISERVER="https://${MASTER_IP}:6443"
  35. scp /root/local/bin/environment.sh app@172.19.2.50:/home/app
  36. scp /root/local/bin/environment.sh app@172.19.2.51:/home/app
 

172.19.2.50的环境变量配置

  1. vim /home/app/environment.sh
  2. #!/usr/bin/bash
  3. export PATH=/root/local/bin:$PATH
  4. # TLS Bootstrapping 使用的 Token,可以使用命令 head -c 16 /dev/urandom | od -An -t x | tr -d ' ' 生成
  5. BOOTSTRAP_TOKEN="11d74483444fb57f6a1cc114ed715949"
  6. # 最好使用 主机未用的网段 来定义服务网段和 Pod 网段
  7. # 服务网段 (Service CIDR),部署前路由不可达,部署后集群内使用IP:Port可达
  8. SERVICE_CIDR="10.254.0.0/16"
  9. # POD 网段 (Cluster CIDR),部署前路由不可达,**部署后**路由可达(flanneld保证)
  10. CLUSTER_CIDR="172.30.0.0/16"
  11. # 服务端口范围 (NodePort Range)
  12. export NODE_PORT_RANGE="8400-9000"
  13. # etcd 集群服务地址列表
  14. export ETCD_ENDPOINTS="https://172.19.2.49:2379,https://172.19.2.50:2379,https://172.19.2.51:2379"
  15. # flanneld 网络配置前缀
  16. export FLANNEL_ETCD_PREFIX="/kubernetes/network"
  17. # kubernetes 服务 IP (一般是 SERVICE_CIDR 中第一个IP)
  18. export CLUSTER_KUBERNETES_SVC_IP="10.254.0.1"
  19. # 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)
  20. export CLUSTER_DNS_SVC_IP="10.254.0.2"
  21. # 集群 DNS 域名
  22. export CLUSTER_DNS_DOMAIN="cluster.local."
  23. # 当前部署的机器名称(随便定义,只要能区分不同机器即可)
  24. export NODE_NAME=etcd-host1
  25. # 当前部署的机器 IP
  26. export NODE_IP=172.19.2.50
  27. # etcd 集群所有机器 IP
  28. export NODE_IPS="172.19.2.49 172.19.2.50 172.19.2.51"
  29. # etcd 集群间通信的IP和端口 
  30. export ETCD_NODES=etcd-host0=https://172.19.2.49:2380,etcd-host1=https://172.19.2.50:2380,etcd-host2=https://172.19.2.51:2380
  31. # 替换为 kubernetes maste 集群任一机器 IP
  32. export MASTER_IP=172.19.2.49
  33. export KUBE_APISERVER="https://${MASTER_IP}:6443"
 

172.19.2.51的环境变量配置

  1. vim /home/app/environment.sh
  2. #!/usr/bin/bash
  3. export PATH=/root/local/bin:$PATH
  4. # TLS Bootstrapping 使用的 Token,可以使用命令 head -c 16 /dev/urandom | od -An -t x | tr -d ' ' 生成
  5. BOOTSTRAP_TOKEN="11d74483444fb57f6a1cc114ed715949"
  6. # 最好使用 主机未用的网段 来定义服务网段和 Pod 网段
  7. # 服务网段 (Service CIDR),部署前路由不可达,部署后集群内使用IP:Port可达
  8. SERVICE_CIDR="10.254.0.0/16"
  9. # POD 网段 (Cluster CIDR),部署前路由不可达,**部署后**路由可达(flanneld保证)
  10. CLUSTER_CIDR="172.30.0.0/16"
  11. # 服务端口范围 (NodePort Range)
  12. export NODE_PORT_RANGE="8400-9000"
  13. # etcd 集群服务地址列表
  14. export ETCD_ENDPOINTS="https://172.19.2.49:2379,https://172.19.2.50:2379,https://172.19.2.51:2379"
  15. # flanneld 网络配置前缀
  16. export FLANNEL_ETCD_PREFIX="/kubernetes/network"
  17. # kubernetes 服务 IP (一般是 SERVICE_CIDR 中第一个IP)
  18. export CLUSTER_KUBERNETES_SVC_IP="10.254.0.1"
  19. # 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)
  20. export CLUSTER_DNS_SVC_IP="10.254.0.2"
  21. # 集群 DNS 域名
  22. export CLUSTER_DNS_DOMAIN="cluster.local."
  23. # 当前部署的机器名称(随便定义,只要能区分不同机器即可)
  24. export NODE_NAME=etcd-host2 
  25. # 当前部署的机器 IP
  26. export NODE_IP=172.19.2.51
  27. # etcd 集群所有机器 IP
  28. export NODE_IPS="172.19.2.49 172.19.2.50 172.19.2.51"
  29. # etcd 集群间通信的IP和端口 
  30. export ETCD_NODES=etcd-host0=https://172.19.2.49:2380,etcd-host1=https://172.19.2.50:2380,etcd-host2=https://172.19.2.51:2380
  31. # 替换为 kubernetes maste 集群任一机器 IP
  32. export MASTER_IP=172.19.2.49
  33. export KUBE_APISERVER="https://${MASTER_IP}:6443"
 

172.19.2.49、172.19.2.50、172.19.2.51上都执行

  1. mv /home/app/environment.sh /root/local/bin/
  2. chown root:root /root/local/bin/environment.sh 
  3. chmod 777 /root/local/bin/environment.sh
  4. source /root/local/bin/environment.sh
  5. wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
  6. chmod +x cfssl_linux-amd64
  7. cp cfssl_linux-amd64 /root/local/bin/cfssl
  8. wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
  9. chmod +x cfssljson_linux-amd64
  10. cp cfssljson_linux-amd64 /root/local/bin/cfssljson
  11. wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
  12. chmod +x cfssl-certinfo_linux-amd64
  13. cp cfssl-certinfo_linux-amd64 /root/local/bin/cfssl-certinfo
  14. export PATH=/root/local/bin:$PATH
 

172.19.2.49上执行操作

  1. mkdir ssl
  2. cd ssl
  3. cfssl print-defaults config > config.json
  4. cfssl print-defaults csr > csr.json
  5. cat > ca-config.json << EOF
  6. {
  7.   "signing": {
  8. "default": {
  9.   "expiry": "8760h"
  10. },
  11. "profiles": {
  12.   "kubernetes": {
  13. "usages": [
  14. "signing",
  15. "key encipherment",
  16. "server auth",
  17. "client auth"
  18. ],
  19. "expiry": "8760h"
  20.   }
  21. }
  22.   }
  23. }
  24. EOF
  25. cat > ca-csr.json << EOF
  26. {
  27.   "CN": "kubernetes",
  28.   "key": {
  29. "algo": "rsa",
  30. "size": 2048
  31.   },
  32.   "names": [
  33. {
  34.   "C": "CN",
  35.   "ST": "BeiJing",
  36.   "L": "BeiJing",
  37.   "O": "k8s",
  38.   "OU": "System"
  39. }
  40.   ]
  41. }
  42. EOF
  43. cfssl gencert -initca ca-csr.json | cfssljson -bare ca
  44. ls ca*
  45. ca-config.json  ca.csr  ca-csr.json  ca-key.pem  ca.pem
  46. mkdir -pv /etc/kubernetes/ssl
  47. cp ca* /etc/kubernetes/ssl
  48. scp ca* root@172.19.2.50:/home/app/ca
  49. scp ca* root@172.19.2.51:/home/app/ca
 

172.19.2.50和172.19.2.51上执行操作

  1. chown -R root:root /home/app/ca
  2. mkdir -pv /etc/kubernetes/ssl
  3. cp /home/app/ca/ca* /etc/kubernetes/ssl
 

二、部署高可用etcd集群

172.19.2.49、172.19.2.50、172.19.2.51上都执行

  1. source /root/local/bin/environment.sh
  2. wget https://github.com/coreos/etcd/releases/download/v3.1.6/etcd-v3.1.6-linux-amd64.tar.gz
  3. tar -xvf etcd-v3.1.6-linux-amd64.tar.gz
  4. cp etcd-v3.1.6-linux-amd64/etcd* /root/local/bin
  5. cat > etcd-csr.json <<EOF
  6. {
  7.   "CN": "etcd",
  8.   "hosts": [
  9. "127.0.0.1",
  10. "${NODE_IP}"
  11.   ],
  12.   "key": {
  13. "algo": "rsa",
  14. "size": 2048
  15.   },
  16.   "names": [
  17. {
  18.   "C": "CN",
  19.   "ST": "BeiJing",
  20.   "L": "BeiJing",
  21.   "O": "k8s",
  22.   "OU": "System"
  23. }
  24.   ]
  25. }
  26. EOF
  27. export PATH=/root/local/bin:$PATH
  28. source /root/local/bin/environment.sh
  29. cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem   -ca-key=/etc/kubernetes/ssl/ca-key.pem   -config=/etc/kubernetes/ssl/ca-config.json   -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
  30. ls etcd*
  31. etcd.csr  etcd-csr.json  etcd-key.pem etcd.pem
  32. mkdir -/etc/etcd/ssl
  33. mv etcd*.pem /etc/etcd/ssl
  34. rm etcd.csr  etcd-csr.json
  35. mkdir -/var/lib/etcd
  36. cat > etcd.service <<EOF
  37. [Unit]
  38. Description=Etcd Server
  39. After=network.target
  40. After=network-online.target
  41. Wants=network-online.target
  42. Documentation=https://github.com/coreos
  43. [Service]
  44. Type=notify
  45. WorkingDirectory=/var/lib/etcd/
  46. ExecStart=/root/local/bin/etcd \  --name=${NODE_NAME} \  --cert-file=/etc/etcd/ssl/etcd.pem \  --key-file=/etc/etcd/ssl/etcd-key.pem \  --peer-cert-file=/etc/etcd/ssl/etcd.pem \  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \  --initial-advertise-peer-urls=https://${NODE_IP}:2380 \  --listen-peer-urls=https://${NODE_IP}:2380 \  --listen-client-urls=https://${NODE_IP}:2379,http://127.0.0.1:2379 \  --advertise-client-urls=https://${NODE_IP}:2379 \  --initial-cluster-token=etcd-cluster-0 \  --initial-cluster=${ETCD_NODES} \  --initial-cluster-state=new \  --data-dir=/var/lib/etcd
  47. Restart=on-failure
  48. RestartSec=5
  49. LimitNOFILE=65536
  50. [Install]
  51. WantedBy=multi-user.target
  52. EOF
  53. mv etcd.service /etc/systemd/system/
  54. systemctl daemon-reload
  55. systemctl enable etcd
  56. systemctl start etcd
  57. systemctl status etcd
 

在172.19.2.49上验证集群

  1. for ip in ${NODE_IPS}; do
  2.   ETCDCTL_API=3 /root/local/bin/etcdctl   --endpoints=https://${ip}:2379    --cacert=/etc/kubernetes/ssl/ca.pem   --cert=/etc/etcd/ssl/etcd.pem   --key=/etc/etcd/ssl/etcd-key.pem   endpoint health; done
 

三、部署Kubectl命令行工具

172.19.2.49、172.19.2.50、172.19.2.51上都执行

  1. vim /root/local/bin/environment.sh
  2. # 替换为kubernetes集群mastr机器IP
  3. export MASTER_IP=172.19.2.49
  4. export KUBE_APISERVER="https://${MASTER_IP}:6443"
  5. source /root/local/bin/environment.sh
 

172.19.2.49上都执行

  1. wget https://dl.k8s.io/v1.6.2/kubernetes-client-linux-amd64.tar.gz
  2. tar -xzvf kubernetes-client-linux-amd64.tar.gz
  3. cp kubernetes/client/bin/kube* /root/local/bin/
  4. chmod a+/root/local/bin/kube*
  5. cat > admin-csr.json << EOF
  6. {
  7.   "CN": "admin",
  8.   "hosts": [],
  9.   "key": {
  10. "algo": "rsa",
  11. "size": 2048
  12.   },
  13.   "names": [
  14. {
  15.   "C": "CN",
  16.   "ST": "BeiJing",
  17.   "L": "BeiJing",
  18.   "O": "system:masters",
  19.   "OU": "System"
  20. }
  21.   ]
  22. }
  23. EOF
  24. cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem   -ca-key=/etc/kubernetes/ssl/ca-key.pem   -config=/etc/kubernetes/ssl/ca-config.json   -profile=kubernetes admin-csr.json | cfssljson -bare admin
  25.   
  26. ls admin*
  27. admin.csr  admin-csr.json  admin-key.pem  admin.pem
  28. mv admin*.pem /etc/kubernetes/ssl/
  29. rm admin.csr admin-csr.json
  30. # 设置集群参数
  31. kubectl config set-cluster kubernetes   --certificate-authority=/etc/kubernetes/ssl/ca.pem   --embed-certs=true   --server=${KUBE_APISERVER}
  32. # 设置客户端认证参数
  33. kubectl config set-credentials admin   --client-certificate=/etc/kubernetes/ssl/admin.pem   --embed-certs=true   --client-key=/etc/kubernetes/ssl/admin-key.pem
  34. # 设置上下文参数
  35. kubectl config set-context kubernetes   --cluster=kubernetes   --user=admin
  36.   
  37. # 设置默认上下文
  38. kubectl config use-context kubernetes
  39. cat ~/.kube/config
 

172.19.2.50、172.19.2.51上执行

  1. scp kubernetes-client-linux-amd64.tar.gz app@172.19.2.50:/home/app
  2. scp kubernetes-client-linux-amd64.tar.gz app@172.19.2.51:/home/app
  3. mv /home/app/kubernetes-client-linux-amd64.tar.gz /home/lvqingshan
  4. chown root:root kubernetes-client-linux-amd64.tar.gz
  5. tar -xzvf kubernetes-client-linux-amd64.tar.gz
  6. cp kubernetes/client/bin/kube* /root/local/bin/
  7. chmod a+/root/local/bin/kube*
  8. mkdir ~/.kube/
 

172.19.2.49上都执行

  1. scp ~/.kube/config root@172.19.2.50:/home/app
  2. scp ~/.kube/config root@172.19.2.51:/home/app
 

172.19.2.50、172.19.2.51上都执行

  1. mv /home/app/config ~/.kube/
  2. chown root:root ~/.kube/config
 

四、部署Flannel网络

172.19.2.49上都执行

  1. cat > flanneld-csr.json <<EOF
  2. {
  3.   "CN": "flanneld",
  4.   "hosts": [],
  5.   "key": {
  6. "algo": "rsa",
  7. "size": 2048
  8.   },
  9.   "names": [
  10. {
  11.   "C": "CN",
  12.   "ST": "BeiJing",
  13.   "L": "BeiJing",
  14.   "O": "k8s",
  15.   "OU": "System"
  16. }
  17.   ]
  18. }
  19. EOF
  20. cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem   -ca-key=/etc/kubernetes/ssl/ca-key.pem   -config=/etc/kubernetes/ssl/ca-config.json   -profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld
  21.   
  22. ls flanneld*
  23. flanneld.csr  flanneld-csr.json  flanneld-key.pem  flanneld.pem
  24. scp flanneld* root@172.19.2.50:/home/app/
  25. scp flanneld* root@172.19.2.51:/home/app/
 

172.19.2.50、172.19.2.51上都执行

  1. mv /home/app/flanneld* .
  2. mv /home/app/flanneld* .
 

172.19.2.49、172.19.2.50、172.19.2.51上都执行

  1. mkdir -/etc/flanneld/ssl
  2. mv flanneld*.pem /etc/flanneld/ssl
  3. rm flanneld.csr  flanneld-csr.json
 

172.19.2.49上执行一次(只在master上执行一次,其他节点不执行)

  1. /root/local/bin/etcdctl   --endpoints=${ETCD_ENDPOINTS}   --ca-file=/etc/kubernetes/ssl/ca.pem   --cert-file=/etc/flanneld/ssl/flanneld.pem   --key-file=/etc/flanneld/ssl/flanneld-key.pem   set ${FLANNEL_ETCD_PREFIX}/config '{"Network":"'${CLUSTER_CIDR}'", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'
  2. mkdir flannel
  3. wget https://github.com/coreos/flannel/releases/download/v0.7.1/flannel-v0.7.1-linux-amd64.tar.gz
  4. tar -xzvf flannel-v0.7.1-linux-amd64.tar.gz -C flannel
  5. cp flannel/{flanneld,mk-docker-opts.sh} /root/local/bin
  6. cat > flanneld.service << EOF
  7. [Unit]
  8. Description=Flanneld overlay address etcd agent
  9. After=network.target
  10. After=network-online.target
  11. Wants=network-online.target
  12. After=etcd.service
  13. Before=docker.service
  14. [Service]
  15. Type=notify
  16. ExecStart=/root/local/bin/flanneld \  -etcd-cafile=/etc/kubernetes/ssl/ca.pem \  -etcd-certfile=/etc/flanneld/ssl/flanneld.pem \  -etcd-keyfile=/etc/flanneld/ssl/flanneld-key.pem \  -etcd-endpoints=${ETCD_ENDPOINTS} \  -etcd-prefix=${FLANNEL_ETCD_PREFIX}
  17. ExecStartPost=/root/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -/run/flannel/docker
  18. Restart=on-failure
  19. [Install]
  20. WantedBy=multi-user.target
  21. RequiredBy=docker.service
  22. EOF
  23. cp flanneld.service /etc/systemd/system/
  24. systemctl daemon-reload
  25. systemctl enable flanneld
  26. systemctl start flanneld
  27. systemctl status flanneld
  28. journalctl  -u flanneld |grep 'Lease acquired'
  29. ifconfig flannel.1
  30. # 查看集群 Pod 网段(/16)
  31. /root/local/bin/etcdctl    --endpoints=${ETCD_ENDPOINTS}    --ca-file=/etc/kubernetes/ssl/ca.pem    --cert-file=/etc/flanneld/ssl/flanneld.pem    --key-file=/etc/flanneld/ssl/flanneld-key.pem    get ${FLANNEL_ETCD_PREFIX}/config
  32. 正常结果
  33. {"Network":"172.30.0.0/16", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}
  34. # 查看已分配的 Pod 子网段列表(/24)
  35. /root/local/bin/etcdctl    --endpoints=${ETCD_ENDPOINTS}    --ca-file=/etc/kubernetes/ssl/ca.pem    --cert-file=/etc/flanneld/ssl/flanneld.pem    --key-file=/etc/flanneld/ssl/flanneld-key.pem    ls ${FLANNEL_ETCD_PREFIX}/subnets
  36. 正常结果
  37. /kubernetes/network/subnets/172.30.27.0-24
  38. # 查看某一 Pod 网段对应的 flanneld 进程监听的 IP 和网络参数
  39. /root/local/bin/etcdctl    --endpoints=${ETCD_ENDPOINTS}    --ca-file=/etc/kubernetes/ssl/ca.pem    --cert-file=/etc/flanneld/ssl/flanneld.pem    --key-file=/etc/flanneld/ssl/flanneld-key.pem    get ${FLANNEL_ETCD_PREFIX}/subnets/172.30.27.0-24
  40. 正常结果
  41. {"PublicIP":"172.19.2.49","BackendType":"vxlan","BackendData":{"VtepMAC":"9a:7b:7e:6a:2e:0b"}}
 

172.19.2.49上都执行

  1. /root/local/bin/etcdctl   --endpoints=${ETCD_ENDPOINTS}   --ca-file=/etc/kubernetes/ssl/ca.pem   --cert-file=/etc/flanneld/ssl/flanneld.pem   --key-file=/etc/flanneld/ssl/flanneld-key.pem   ls ${FLANNEL_ETCD_PREFIX}/subnets
 

正常结果

  1. /kubernetes/network/subnets/172.30.27.0-24
  2. /kubernetes/network/subnets/172.30.22.0-24
  3. /kubernetes/network/subnets/172.30.38.0-24
 

分别ping以下地址,注意自己ping自己ping不通

  1. 172.30.27.1
  2. 172.30.22.1
  3. 172.30.38.1
 

五、部署master节点

kubernetes master 节点包含的组件: kube-apiserver kube-scheduler kube-controller-manager

172.19.2.49上执行

  1. wget https://github.com/kubernetes/kubernetes/releases/download/v1.6.2/kubernetes.tar.gz
  2. tar -xzvf kubernetes.tar.gz
  3. cd kubernetes
  4. ./cluster/get-kube-binaries.sh
  5. wget https://dl.k8s.io/v1.6.2/kubernetes-server-linux-amd64.tar.gz
  6. tar -xzvf kubernetes-server-linux-amd64.tar.gz
  7. cd kubernetes
  8. tar -xzvf  kubernetes-src.tar.gz
  9. cp -r server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet} /root/local/bin/
  10. cd ../..
  11. cat > kubernetes-csr.json <<EOF
  12. {
  13.   "CN": "kubernetes",
  14.   "hosts": [
  15. "127.0.0.1",
  16. "${MASTER_IP}",
  17. "${CLUSTER_KUBERNETES_SVC_IP}",
  18. "kubernetes",
  19. "kubernetes.default",
  20. "kubernetes.default.svc",
  21. "kubernetes.default.svc.cluster",
  22. "kubernetes.default.svc.cluster.local"
  23.   ],
  24.   "key": {
  25. "algo": "rsa",
  26. "size": 2048
  27.   },
  28.   "names": [
  29. {
  30.   "C": "CN",
  31.   "ST": "BeiJing",
  32.   "L": "BeiJing",
  33.   "O": "k8s",
  34.   "OU": "System"
  35. }
  36.   ]
  37. }
  38. EOF
  39. cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem   -ca-key=/etc/kubernetes/ssl/ca-key.pem   -config=/etc/kubernetes/ssl/ca-config.json   -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
  40. ls kubernetes*
  41. kubernetes.csr  kubernetes-csr.json  kubernetes-key.pem  kubernetes.pem
  42. mkdir -/etc/kubernetes/ssl/
  43. mv kubernetes*.pem /etc/kubernetes/ssl/
  44. rm kubernetes.csr  kubernetes-csr.json
  45. cat > token.csv <<EOF
  46. ${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
  47. EOF
  48. mv token.csv /etc/kubernetes/
  49. cat  > kube-apiserver.service <<EOF
  50. [Unit]
  51. Description=Kubernetes API Server
  52. Documentation=https://github.com/GoogleCloudPlatform/kubernetes
  53. After=network.target
  54. [Service]
  55. ExecStart=/root/local/bin/kube-apiserver \  --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \  --advertise-address=${MASTER_IP} \  --bind-address=${MASTER_IP} \  --insecure-bind-address=${MASTER_IP} \  --authorization-mode=RBAC \  --runtime-config=rbac.authorization.k8s.io/v1alpha1 \  --kubelet-https=true \  --experimental-bootstrap-token-auth \  --token-auth-file=/etc/kubernetes/token.csv \  --service-cluster-ip-range=${SERVICE_CIDR} \  --service-node-port-range=${NODE_PORT_RANGE} \  --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \  --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \  --client-ca-file=/etc/kubernetes/ssl/ca.pem \  --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \  --etcd-cafile=/etc/kubernetes/ssl/ca.pem \  --etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \  --etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem \  --etcd-servers=${ETCD_ENDPOINTS} \  --enable-swagger-ui=true \  --allow-privileged=true \  --apiserver-count=3 \  --audit-log-maxage=30 \  --audit-log-maxbackup=3 \  --audit-log-maxsize=100 \  --audit-log-path=/var/lib/audit.log \  --event-ttl=1h \  --v=2
  56. Restart=on-failure
  57. RestartSec=5
  58. Type=notify
  59. LimitNOFILE=65536
  60. [Install]
  61. WantedBy=multi-user.target
  62. EOF
  63. cp kube-apiserver.service /etc/systemd/system/
  64. systemctl daemon-reload
  65. systemctl enable kube-apiserver
  66. systemctl start kube-apiserver
  67. systemctl status kube-apiserver
  68. cat > kube-controller-manager.service <<EOF
  69. [Unit]
  70. Description=Kubernetes Controller Manager
  71. Documentation=https://github.com/GoogleCloudPlatform/kubernetes
  72. [Service]
  73. ExecStart=/root/local/bin/kube-controller-manager \  --address=127.0.0.1 \  --master=http://${MASTER_IP}:8080 \  --allocate-node-cidrs=true \  --service-cluster-ip-range=${SERVICE_CIDR} \  --cluster-cidr=${CLUSTER_CIDR} \  --cluster-name=kubernetes \  --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \  --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \  --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \  --root-ca-file=/etc/kubernetes/ssl/ca.pem \  --leader-elect=true \  --v=2
  74. Restart=on-failure
  75. RestartSec=5
  76. [Install]
  77. WantedBy=multi-user.target
  78. EOF
  79. cp kube-controller-manager.service /etc/systemd/system/
  80. systemctl daemon-reload
  81. systemctl enable kube-controller-manager
  82. systemctl start kube-controller-manager
  83. systemctl status kube-controller-manager
  84. cat > kube-scheduler.service <<EOF
  85. [Unit]
  86. Description=Kubernetes Scheduler
  87. Documentation=https://github.com/GoogleCloudPlatform/kubernetes
  88. [Service]
  89. ExecStart=/root/local/bin/kube-scheduler \  --address=127.0.0.1 \  --master=http://${MASTER_IP}:8080 \  --leader-elect=true \  --v=2
  90. Restart=on-failure
  91. RestartSec=5
  92. [Install]
  93. WantedBy=multi-user.target
  94. EOF
  95. cp kube-scheduler.service /etc/systemd/system/
  96. systemctl daemon-reload
  97. systemctl enable kube-scheduler
  98. systemctl start kube-scheduler
  99. systemctl status kube-scheduler
 

验证节点健康状况

  1. kubectl get componentstatuses
 

六、部署Node节点

kubernetes Node 节点包含如下组件: flanneld docker kubelet kube-proxy

172.19.2.49、172.19.2.50、172.19.2.51上都执行

  1. #安装docker
  2. yum install docker-ce
  3. rpm -qa | grep docker
  4. docker-ce-selinux-17.03.1.ce-1.el7.centos.noarch
  5. docker-ce-17.03.1.ce-1.el7.centos.x86_64
  6. #修改docker启动文件,在启动文件中加入一行
  7. vim /etc/systemd/system/docker.service
  8. [Service]
  9. Type=notify
  10. Environment=GOTRACEBACK=crash
  11. EnvironmentFile=-/run/flannel/docker
  12. systemctl daemon-reload
  13. systemctl enable docker
  14. systemctl start docker
  15. docker version
  16. kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
  17. wget https://dl.k8s.io/v1.6.2/kubernetes-server-linux-amd64.tar.gz
  18. tar -xzvf kubernetes-server-linux-amd64.tar.gz
  19. cd kubernetes
  20. tar -xzvf  kubernetes-src.tar.gz
  21. cp -./server/bin/{kube-proxy,kubelet} /root/local/bin/
  22. # 设置集群参数
  23. kubectl config set-cluster kubernetes   --certificate-authority=/etc/kubernetes/ssl/ca.pem   --embed-certs=true   --server=${KUBE_APISERVER}   --kubeconfig=bootstrap.kubeconfig
  24. # 设置客户端认证参数
  25. kubectl config set-credentials kubelet-bootstrap   --token=${BOOTSTRAP_TOKEN}   --kubeconfig=bootstrap.kubeconfig
  26. # 设置上下文参数
  27. kubectl config set-context default   --cluster=kubernetes   --user=kubelet-bootstrap   --kubeconfig=bootstrap.kubeconfig
  28. # 设置默认上下文
  29. kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
  30. mv bootstrap.kubeconfig /etc/kubernetes/
  31. mkdir /var/lib/kubelet
  32. cat > kubelet.service <<EOF
  33. [Unit]
  34. Description=Kubernetes Kubelet
  35. Documentation=https://github.com/GoogleCloudPlatform/kubernetes
  36. After=docker.service
  37. Requires=docker.service
  38. [Service]
  39. WorkingDirectory=/var/lib/kubelet
  40. ExecStart=/root/local/bin/kubelet \  --address=${NODE_IP} \  --hostname-override=${NODE_IP} \  --pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest \  --experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \  --require-kubeconfig \  --cert-dir=/etc/kubernetes/ssl \  --cluster_dns=${CLUSTER_DNS_SVC_IP} \  --cluster_domain=${CLUSTER_DNS_DOMAIN} \  --hairpin-mode promiscuous-bridge \  --allow-privileged=true \  --serialize-image-pulls=false \  --logtostderr=true \  --v=2
  41. Restart=on-failure
  42. RestartSec=5
  43. [Install]
  44. WantedBy=multi-user.target
  45. EOF
  46. cp kubelet.service /etc/systemd/system/kubelet.service
  47. systemctl daemon-reload
  48. systemctl enable kubelet
  49. systemctl start kubelet
  50. systemctl status kubelet
  51. journalctl --u kubelet
 

172.19.2.49上执行

  1. kubectl get csr
  2. #注意approve后为kubelet节点的NAME,此处为通过节点的验证
  3. kubectl certificate approve csr-1w6sj
  4. kubectl get csr
  5. kubectl get nodes
  6. ls -/etc/kubernetes/kubelet.kubeconfig
  7. ls -/etc/kubernetes/ssl/kubelet*
  8. cat > kube-proxy-csr.json << EOF
  9. {
  10.   "CN": "system:kube-proxy",
  11.   "hosts": [],
  12.   "key": {
  13. "algo": "rsa",
  14. "size": 2048
  15.   },
  16.   "names": [
  17. {
  18.   "C": "CN",
  19.   "ST": "BeiJing",
  20.   "L": "BeiJing",
  21.   "O": "k8s",
  22.   "OU": "System"
  23. }
  24.   ]
  25. }
  26. EOF
  27. cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem   -ca-key=/etc/kubernetes/ssl/ca-key.pem   -config=/etc/kubernetes/ssl/ca-config.json   -profile=kubernetes  kube-proxy-csr.json | cfssljson -bare kube-proxy
  28. ls kube-proxy*
  29. cp kube-proxy*.pem /etc/kubernetes/ssl/
  30. rm kube-proxy.csr  kube-proxy-csr.json
  31. scp kube-proxy*.pem root@172.19.2.51:/home/lvqingshan
  32. scp kube-proxy*.pem root@172.19.2.51:/home/lvqingshan
 

172.19.2.50、172.19.2.51上都执行

  1. mv /home/lvqingshan/kube-proxy*.pem /etc/kubernetes/ssl/
 

172.19.2.49、172.19.2.50、172.19.2.51上都执行

  1. # 设置集群参数
  2. kubectl config set-cluster kubernetes   --certificate-authority=/etc/kubernetes/ssl/ca.pem   --embed-certs=true   --server=${KUBE_APISERVER}   --kubeconfig=kube-proxy.kubeconfig
  3. # 设置客户端认证参数
  4. kubectl config set-credentials kube-proxy   --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem   --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem   --embed-certs=true   --kubeconfig=kube-proxy.kubeconfig
  5. # 设置上下文参数
  6. kubectl config set-context default   --cluster=kubernetes   --user=kube-proxy   --kubeconfig=kube-proxy.kubeconfig
  7. # 设置默认上下文
  8. kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
  9. mv kube-proxy.kubeconfig /etc/kubernetes/
  10. mkdir -/var/lib/kube-proxy
  11. cat > kube-proxy.service <<EOF
  12. [Unit]
  13. Description=Kubernetes Kube-Proxy Server
  14. Documentation=https://github.com/GoogleCloudPlatform/kubernetes
  15. After=network.target
  16. [Service]
  17. WorkingDirectory=/var/lib/kube-proxy
  18. ExecStart=/root/local/bin/kube-proxy \  --bind-address=${NODE_IP} \  --hostname-override=${NODE_IP} \  --cluster-cidr=${SERVICE_CIDR} \  --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \  --logtostderr=true \  --v=2
  19. Restart=on-failure
  20. RestartSec=5
  21. LimitNOFILE=65536
  22. [Install]
  23. WantedBy=multi-user.target
  24. EOF
  25. cp kube-proxy.service /etc/systemd/system/
  26. systemctl daemon-reload
  27. systemctl enable kube-proxy
  28. systemctl start kube-proxy
  29. systemctl status kube-proxy
 

172.19.2.49上执行 cat > nginx-ds.yml ? EOF apiVersion: v1 kind: Service metadata: name: nginx-ds labels: app: nginx-ds spec: type: NodePort selector: app: nginx-ds ports: - name: http port: 80 targetPort: 80

  1. ---
  2. apiVersion: extensions/v1beta1
  3. kind: DaemonSet
  4. metadata:
  5.   name: nginx-ds
  6.   labels:
  7. addonmanager.kubernetes.io/mode: Reconcile
  8. spec:
  9.   template:
  10. metadata:
  11.   labels:
  12. app: nginx-ds
  13. spec:
  14.   containers:
  15.   - name: my-nginx
  16. image: nginx:1.7.9
  17. ports:
  18. - containerPort: 80
  19. EOF
  20. kubectl create -f nginx-ds.yml
  21. kubectl get nodes
  22. kubectl get pods  -o wide|grep nginx-ds
  23. nginx-ds-4cbd3   1/1       Running   0          1m        172.17.0.2   172.19.2.51
  24. nginx-ds-f0217   1/1       Running   0          1m        172.17.0.2   172.19.2.50
  25. kubectl get svc |grep nginx-ds
  26. nginx-ds     10.254.194.173   <nodes>       80:8542/TCP   1m
  27. curl 10.254.194.173
 

七、部署DNS插件

172.19.2.49上执行

  1. mkdir -pv /home/lvqingshan/dns
  2. cd /home/lvqingshan/dns
  3. wget https://github.com/opsnull/follow-me-install-kubernetes-cluster/raw/master/manifests/kubedns/kubedns-cm.yaml
  4. wget https://github.com/opsnull/follow-me-install-kubernetes-cluster/raw/master/manifests/kubedns/kubedns-controller.yaml
  5. wget https://github.com/opsnull/follow-me-install-kubernetes-cluster/raw/master/manifests/kubedns/kubedns-sa.yaml
  6. wget https://github.com/opsnull/follow-me-install-kubernetes-cluster/raw/master/manifests/kubedns/kubedns-svc.yaml
  7. kubectl get clusterrolebindings system:kube-dns -o yaml
  8. apiVersion: rbac.authorization.k8s.io/v1beta1
  9. kind: ClusterRoleBinding
  10. metadata:
  11.   annotations:
  12. rbac.authorization.kubernetes.io/autoupdate: "true"
  13.   creationTimestamp: 2017-05-23T07:18:07Z
  14.   labels:
  15. kubernetes.io/bootstrapping: rbac-defaults
  16.   name: system:kube-dns
  17.   resourceVersion: "56"
  18.   selfLink: /apis/rbac.authorization.k8s.io/v1beta1/clusterrolebindingssystem%3Akube-dns
  19.   uid: f8284130-3f87-11e7-964f-005056bfceaa
  20. roleRef:
  21.   apiGroup: rbac.authorization.k8s.io
  22.   kind: ClusterRole
  23.   name: system:kube-dns
  24. subjects:
  25. - kind: ServiceAccount
  26.   name: kube-dns
  27.   namespace: kube-system
  28. pwd
  29. /home/lvqingshan/dns
  30. ls *
  31. kubedns-cm.yaml  kubedns-controller.yaml  kubedns-sa.yaml  kubedns-svc.yaml
  32. kubectl create -.
  33. cat > my-nginx.yaml << EOF
  34. apiVersion: extensions/v1beta1
  35. kind: Deployment
  36. metadata:
  37.   name: my-nginx
  38. spec:
  39.   replicas: 2
  40.   template:
  41. metadata:
  42.   labels:
  43. run: my-nginx
  44. spec:
  45.   containers:
  46.   - name: my-nginx
  47. image: nginx:1.7.9
  48. ports:
  49. - containerPort: 80
  50. EOF
  51. kubectl create -my-nginx.yaml
  52. kubectl expose deploy my-nginx
  53. kubectl get services --all-namespaces |grep my-nginx
  54. default       my-nginx     10.254.57.162   <none>        80/TCP          14s
  55. cat > pod-nginx.yaml << EOF
  56. apiVersion: v1
  57. kind: Pod
  58. metadata:
  59.   name: nginx
  60. spec:
  61.   containers:
  62.   - name: nginx
  63. image: nginx:1.7.9
  64. ports:
  65. - containerPort: 80
  66. EOF
  67. kubectl create -f pod-nginx.yaml
  68. kubectl exec  nginx ---- /bin/bash
  69. cat /etc/resolv.conf
  70. exit
 

八、部署Dashboard插件

172.19.2.49上执行

  1. mkdir dashboard
  2. cd dashboard
  3. wget https://github.com/opsnull/follow-me-install-kubernetes-cluster/raw/master/manifests/dashboard/dashboard-controller.yaml
  4. wget https://github.com/opsnull/follow-me-install-kubernetes-cluster/raw/master/manifests/dashboard/dashboard-rbac.yaml
  5. wget https://github.com/opsnull/follow-me-install-kubernetes-cluster/raw/master/manifests/dashboard/dashboard-service.yaml
  6. pwd
  7. /home/lvqingshan/dashboard
  8. ls *.yaml
  9. dashboard-controller.yaml  dashboard-rbac.yaml  dashboard-service.yaml
  10. #增加apiserver地址
  11. vim dashboard-controller.yaml
  12. ports:
  13. - containerPort: 9090
  14. livenessProbe:
  15.   httpGet:
  16. path: /
  17. port: 9090
  18.   initialDelaySeconds: 30
  19.   timeoutSeconds: 30
  20. args:
  21. - --apiserver-host=http://172.19.2.49:8080
  22. #增加nodePort端口
  23. vim dashboard-service.yaml
  24. spec:
  25.   type: NodePort
  26.   selector:
  27. k8s-app: kubernetes-dashboard
  28.   ports:
  29.   - port: 80
  30. targetPort: 9090
  31. nodePort: 8484
  32. kubectl create -.
  33. kubectl get services kubernetes-dashboard -n kube-system
  34. NAME                   CLUSTER-IP      EXTERNAL-IP   PORT(S)       AGE
  35. kubernetes-dashboard   10.254.86.190   <nodes>       80:8861/TCP   21s
  36. kubectl get deployment kubernetes-dashboard  -n kube-system
  37. NAME                   DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
  38. kubernetes-dashboard   1         1         1            1           57s
  39. kubectl get pods  -n kube-system | grep dashboard
  40. kubernetes-dashboard-3677875397-8lhp3   1/1       Running   0          1m
  41. #通过 kubectl proxy 访问 dashboard
  42. kubectl proxy --address='172.19.2.49' --port=8086 --accept-hosts='^*$'
  43. kubectl cluster-info
  44. Kubernetes master is running at https://172.19.2.49:6443
  45. KubeDNS is running at https://172.19.2.49:6443/api/v1/proxy/namespaces/kube-system/services/kube-dns
  46. kubernetes-dashboard is running at https://172.19.2.49:6443/api/v1/proxy/namespaces/kube-system/services/kubernetes-dashboard
  47. #通过 kube-apiserver 访问dashboard
  48. 在浏览器中输入以下地址访问:
  49. http://172.19.2.49:8080/api/v1/proxy/namespaces/kube-system/services/kubernetes-dashboard
 

九、部署harbor私有仓库

172.19.2.49上执行

  1. source /root/local/bin/environment.sh
  2. cd /root/harbor
  3. cat > harbor-csr.json <<EOF
  4. {
  5.   "CN": "harbor",
  6.   "hosts": [
  7. "$NODE_IP"
  8.   ],
  9.   "key": {
  10. "algo": "rsa",
  11. "size": 2048
  12.   },
  13.   "names": [
  14. {
  15.   "C": "CN",
  16.   "ST": "BeiJing",
  17.   "L": "BeiJing",
  18.   "O": "k8s",
  19.   "OU": "System"
  20. }
  21.   ]
  22. }
  23. EOF
  24. cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem   -ca-key=/etc/kubernetes/ssl/ca-key.pem   -config=/etc/kubernetes/ssl/ca-config.json   -profile=kubernetes harbor-csr.json | cfssljson -bare harbor
  25. ls harbor*
  26. harbor.csr  harbor-csr.json  harbor-key.pem harbor.pem
  27. mkdir -/etc/harbor/ssl
  28. mv harbor*.pem /etc/harbor/ssl
  29. rm harbor.csr  harbor-csr.json
  30. export PATH=/usr/local/bin/:$PATH
  31. mkdir -/etc/docker/certs.d/172.19.2.49
  32. cp /etc/kubernetes/ssl/ca.pem /etc/docker/certs.d/172.19.2.49/ca.crt
  33. vim /etc/sysconfig/docker
  34. OPTIONS='--selinux-enabled --insecure-registry=172.19.2.49'
  35. yum install ca-certificates
  36. vim /root/harbor/harbor.cfg
  37. hostname = 172.19.2.49
  38. ui_url_protocol = https
  39. ssl_cert = /etc/harbor/ssl/harbor.pem
  40. ssl_cert_key = /etc/harbor/ssl/harbor-key.pem
  41. #登录harbor的账号密码
  42. harbor_admin_password = Harbor123456
  43. ./install.sh
  44. #停止、启动
  45. docker-compose down -v
  46. docker-compose up -d
  47. #登录172.19.2.49
  48. 账号:admin
  49. 密码:Harbor123456
  50. #浏览器登录仓库url
  51. https://172.19.2.49/
  52. #上传本地镜像到私有仓库
  53. docker login 172.19.2.49
  54. docker tag quay.io/pires/docker-elasticsearch-kubernetes:5.4.0 172.19.2.49/library/docker-elasticsearch-kubernetes:5.4
  55. docker push 172.19.2.49/library/docker-elasticsearch-kubernetes:5.4
 

172.19.2.50、172.19.2.51上都执行

  1. mkdir -pv /etc/docker/certs.d/172.19.2.49/
  2. vim /etc/sysconfig/docker
  3. OPTIONS='--selinux-enabled --insecure-registry=172.19.2.49'
  4. yum install ca-certificates
 

172.19.2.49上传证书

  1. scp /etc/docker/certs.d/172.19.2.49/ca.crt root@172.19.2.50:/etc/docker/certs.d/172.19.2.49/
  2. scp /etc/docker/certs.d/172.19.2.49/ca.crt root@172.19.2.51:/etc/docker/certs.d/172.19.2.49/
 

172.19.2.50、172.19.2.51登录harbor仓库 docker login 172.19.2.49

九、小方法

因为我用calico的方式部署过集群网络,如何删除安装calico产生的tunl0网卡

  1. modprobe -r ipip
 友情链接:直通硅谷  点职佳  北美留学生论坛

本站QQ群:前端 618073944 | Java 606181507 | Python 626812652 | C/C++ 612253063 | 微信 634508462 | 苹果 692586424 | C#/.net 182808419 | PHP 305140648 | 运维 608723728

W3xue 的所有内容仅供测试,对任何法律问题及风险不承担任何责任。通过使用本站内容随之而来的风险与本站无关。
关于我们  |  意见建议  |  捐助我们  |  报错有奖  |  广告合作、友情链接(目前9元/月)请联系QQ:27243702 沸活量
皖ICP备17017327号-2 皖公网安备34020702000426号