本文共 24967 字,大约阅读时间需要 83 分钟。
cd /data/tools/kubernetes/yum -y install docker-ce-17.09.1.ce-1.el7.centos.x86_64.rpm
查看 Docker 默认存储位置
docker info | grep "Docker Root Dir"docker info | grep "Storage Driver"
修改 Docker 默认存储位置(docker服务启动脚本会调用docker.service.d下的配置文件)
mkdir -pv /etc/systemd/system/docker.service.dcat > /etc/systemd/system/docker.service.d/docker.conf <
启动docker
systemctl daemon-reload && \systemctl start docker && \systemctl -l status docker
查看docker运行日志
journalctl -f -u dockerjournalctl -xesystemctl stop dockersystemctl restart dockernetstat -ntlpsystemctl enable docker
设置环境变量,开启转发选项
cat > /etc/sysctl.d/k8s.conf <
备注: etcd和kuberneter都需要生产证书(下面分别配置etcd和kuberneter的真是真是证书)
etcd证书存放目录:/etc/etcd/ssl/kubernetes证书存放目录:/etc/kubernetes/ssl创建存放证书的临时目录(先把etcd及kuberneter需要的证书先在一台服务器上生成再做分发到其他服务)
mkdir -pv /data/ssl && \cd /data/ssl/安装 cfssl(生产证书的工具)
curl -s -L -o /usr/bin/cfssl curl -s -L -o /usr/bin/cfssljson curl -s -L -o /usr/bin/cfssl-certinfo chmod +x /usr/bin/cfssl*生成 CA 证书 和 私钥
config.json配置文件cat >> config.json << EOF{ "signing": { "default": { "expiry": "87600h" }, "profiles": { "kubernetes": { "usages": [ "signing", "key encipherment", "server auth", "client auth" ], "expiry":"87600h" } } }}EOF
etcd-csr.json配置文件
cat >> etcd-csr.json << EOF{ "CN": "etcd", "hosts": [ "127.0.0.1", "10.10.175.3", "10.10.188.125", "10.10.121.199" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shanghai", "L": "Shanghai", "O": "k8s", "OU": "System" } ]}EOF
csr.json配置文件
cat >> csr.json << EOF{ "CN": "kubernetes", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C":"CN", "ST": "Shanghai", "L": "Shanghai", "O": "k8s", "OU": "System" } ]}EOF
备注:etcd-csr.json中的hosts需要根据自己环境的具体情况去配置
把上面三个文件复制到/data/ssl目录下cd /data/ssl/cfssl gencert -initca csr.json | cfssljson -bare ca
创建 etcd 证书配置,生成 etcd 证书和私钥
cfssl gencert \-ca=ca.pem \-ca-key=ca-key.pem \-config=config.json \-profile=kubernetes \etcd-csr.json | cfssljson -bare etcd[root@k8smaster01 ~]# ll /data/ssl-rw-r--r-- 1 root root 1005 Dec 21 13:56 ca.csr-rw------- 1 root root 1679 Dec 21 13:56 ca-key.pem-rw-r--r-- 1 root root 1363 Dec 21 13:56 ca.pem-rw-r--r-- 1 root root 385 Dec 21 13:56 config.json-rw-r--r-- 1 root root 265 Dec 21 13:56 csr.json-rw-r--r-- 1 root root 1066 Dec 21 13:56 etcd.csr-rw-r--r-- 1 root root 375 Dec 21 13:56 etcd-csr.json-rw------- 1 root root 1675 Dec 21 13:56 etcd-key.pem-rw-r--r-- 1 root root 1440 Dec 21 13:56 etcd.pem
到此etcd的证书文件生成了
创建 kube-apiserver 证书配置,生成 kube-apiserver 证书和私钥
首先把下面附件中的文件都复制到/data/ssl目录下front-proxy-client-csr.json配置文件cat >> front-proxy-client-csr.json << EOF{ "CN": "system:kube-scheduler", "hosts": [ "10.10.175.3", "10.10.188.125" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shanghai", "L": "Shanghai", "O": "system:kube-scheduler", "OU": "System" } ]}EOF
kube-admin-csr.json配置文件
cat >> kube-admin-csr.json << EOF{ "CN": "kube-admin", "hosts": [ "10.10.175.3" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shanghai", "L": "Shanghai", "O": "system:masters", "OU": "System" } ]}EOF
kube-apiserver-csr.json配置文件
cat >> kube-apiserver-csr.json << EOF{ "CN": "kubernetes", "hosts": [ "127.0.0.1", "10.10.175.3", "10.10.188.125", "10.10.121.199", "10.254.0.1", "localhost", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shanghai", "L": "Shanghai", "O": "k8s", "OU": "System" } ]}EOF
kube-controller-manager-csr.json配置文件
cat >> kube-controller-manager-csr.json << EOF{ "CN": "system:kube-controller-manager", "hosts": [ "10.10.175.3", "10.10.188.125" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shanghai", "L": "Shanghai", "O": "system:kube-controller-manager", "OU": "System" } ]}EOF
front-proxy-client-csr.json配置文件
cat >> front-proxy-client-csr.json << EOF{ "CN": "front-proxy-client", "key": { "algo": "rsa", "size": 2048 }}EOF
kube-proxy-csr.json配置文件
cat >> kube-proxy-csr.json << EOF{ "CN": "system:kube-proxy", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shanghai", "L": "Shanghai", "O": "system:kube-proxy", "OU": "System" } ]}EOF
kube-scheduler-csr.json配置文件
cat >> kube-scheduler-csr.json << EOF{ "CN": "system:kube-scheduler", "hosts": [ "10.10.175.3", "10.10.188.125" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shanghai", "L": "Shanghai", "O": "system:kube-scheduler", "OU": "System" } ]}EOF
kubelet-csr.json配置文件
cat >> kubelet-csr.json << EOF{ "CN": "system:node:master01", "hosts": [ "master01", "10.10.175.3" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "Shanghai", "ST": "Shanghai", "O": "system:nodes", "OU": "Kubernetes-manual" } ]}EOF
备注:kube-admin-csr.json,kube-apiserver-csr.json,kube-controller-manager-csr.json,kubelet-csr.json,kube-scheduler-csr.json这五个json文件的hosts内容根据自己环境的实际情况作相应的更改
cfssl gencert \json-ca=ca.pem \-ca-key=ca-key.pem \-config=config.json \-profile=kubernetes \kube-apiserver-csr.json | cfssljson -bare kube-apiserver
创建 kube-controller-manager 证书配置,生成 kube-controller-manager 证书和私钥
cfssl gencert \-ca=ca.pem \-ca-key=ca-key.pem \-config=config.json \-profile=kubernetes \kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
创建 kube-scheduler 证书配置,生成 kube-scheduler 证书和私钥
cfssl gencert \-ca=ca.pem \-ca-key=ca-key.pem \-config=config.json \-profile=kubernetes \kube-scheduler-csr.json | cfssljson -bare kube-scheduler
创建 kube-admin 证书配置,生成 kube-admin 证书和私钥
cfssl gencert \-ca=ca.pem \-ca-key=ca-key.pem \-config=config.json \-profile=kubernetes \kube-admin-csr.json | cfssljson -bare kube-admin
创建 kube-proxy 证书配置,生成 kube-proxy 证书和私钥
cfssl gencert \-ca=ca.pem \-ca-key=ca-key.pem \-config=config.json \-profile=kubernetes \kube-proxy-csr.json | cfssljson -bare kube-proxy
生成 高级审计 配置
cat > audit-policy.yaml <
生成 token.csv
cd /data/ssl/export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')cat > /data/ssl/token.csv <
查看生产的证书文件
kubelet 也可以手动通过 CA 来进行签发,但是这只能针对少数机器,毕竟我们在进行证书签发的时候,是需要绑定对应 Node 的 IP 的,如果 node 太多了,加 IP 就会很幸苦, 所以这里我们使用 TLS 认证,由 apiserver 自动给符合条件的 node 签发证书,允许节点加入集群。kubelet 首次启动时向 kube-apiserver 发送 TLS Bootstrapping 请求,kube-apiserver 验证 kubelet 请求中的 token 是否与它配置的 token 一致,如果一致则自动为 kubelet 生成证书和密钥。无论是node节点还是master节点都需要etcd和kubeneter证书
证书分发之前首先在其他的点建好下面两个目录mkdir -pv /etc/etcd/ssl && mkdir -pv /etc/kubernetes/ssl证书分发cd /data/ssl/rsync -avzP ca*.pem etcd*.pem root@10.10.188.125:/etc/etcd/ssl/rsync -avzP ca*.pem kube-apiserver*.pem kube-controller-manager*.pem kube-scheduler*.pem kube-proxy*.pem root@10.10.188.125:/etc/kubernetes/ssl/rsync -avzP bootstrap.kubeconfig kube-proxy.kubeconfig audit-policy.yaml token.csv root@10.10.188.125 :/etc/kubernetes/
etcd集群至少需要三台服务器才能做集群
下载地址:cd /data/tools/kubernetes/tar -zxvf etcd-v3.2.11-linux-amd64.tar.gzcd etcd-v3.2.11-linux-amd64/ && \cp etcd etcdctl /usr/bin/ && \etcd --version
etcd配置文件(配置文件中改成当前服务器IP)
ETCD_INITIAL_CLUSTER_STATE参数:new 初始化集群安装时使用该选项;existing 新加入集群时使用该选项ETCD_NAME命名必须和ETCD_INITIAL_CLUSTER设置对应上cat > /etc/etcd/etcd.conf <
etcd启动文件
cat > /usr/lib/systemd/system/etcd.service <
查看集群状态(在etcd 集群中任意一台执行下面的命令)
etcdctl --endpoints=https://10.10.175.3:2379 --cert-file=/etc/etcd/ssl/etcd.pem --ca-file=/etc/etcd/ssl/ca.pem --key-file=/etc/etcd/ssl/etcd-key.pem cluster-health
查看etcd集群成员etcdctl --endpoints=https://10.10.175.3:2379 --cert-file=/etc/etcd/ssl/etcd.pem --ca-file=/etc/etcd/ssl/ca.pem --key-file=/etc/etcd/ssl/etcd-key.pem member list
查看etcd集群中的存储信息(ls后面可接具体目录)etcdctl --endpoints=https://127.0.0.1:2379 --cert-file=/etc/etcd/ssl/etcd.pem --ca-file=/etc/etcd/ssl/ca.pem --key-file=/etc/etcd/ssl/etcd-key.pem ls
1、master节点安装
下载地址:#拷贝相关命令(kubernetes-server解压包中包含了server,client,node所有的脚本)cd /data/tools/kubernetes/v1.8.5/tar -xvf kubernetes-server-linux-amd64.tar.gzcd kubernetes/server/bin/cp kube-apiserver kube-controller-manager kube-scheduler /usr/bin/cp kubelet kube-proxy /usr/bin/cp kubectl /usr/bin/
设置为当前master节点IP
export KUBE_APISERVER="https://10.10.175.3:6443"
创建 kubelet bootstrapping kubeconfig 配置文件
kubectl config set-cluster kubernetes \--certificate-authority=ca.pem \--embed-certs=true \--server=${KUBE_APISERVER} \--kubeconfig=bootstrap.kubeconfigkubectl config set-credentials kubelet-bootstrap \--token=${BOOTSTRAP_TOKEN} \--kubeconfig=bootstrap.kubeconfigkubectl config set-context default \--cluster=kubernetes \--user=kubelet-bootstrap \--kubeconfig=bootstrap.kubeconfigkubectl config use-context default --kubeconfig=bootstrap.kubeconfigcp ./bootstrap.kubeconfig /etc/kubernetes/
创建 kube-proxy kubeconfig 配置文件
cd /data/ssl/kubectl config set-cluster kubernetes \--certificate-authority=ca.pem \--embed-certs=true \--server=${KUBE_APISERVER} \--kubeconfig=kube-proxy.kubeconfigkubectl config set-credentials kube-proxy \--client-certificate=kube-proxy.pem \--client-key=kube-proxy-key.pem \--embed-certs=true \--kubeconfig=kube-proxy.kubeconfigkubectl config set-context default \--cluster=kubernetes \--user=kube-proxy \--kubeconfig=kube-proxy.kubeconfigkubectl config use-context default --kubeconfig=kube-proxy.kubeconfigcp ./kube-proxy.kubeconfig /etc/kubernetes/
创建 kube-admin kubeconfig 配置文件
kubectl config set-cluster kubernetes \--certificate-authority=ca.pem \--embed-certs=true \--server=${KUBE_APISERVER} \--kubeconfig=admin.confkubectl config set-credentials kube-admin \--client-certificate=kube-admin.pem \--embed-certs=true \--client-key=kube-admin-key.pem \--kubeconfig=admin.confkubectl config set-context kube-admin@kubernetes \--cluster=kubernetes \--user=kube-admin \--kubeconfig=admin.confkubectl config use-context kube-admin@kubernetes --kubeconfig=admin.confcp ./admin.conf /etc/kubernetes/cp /etc/kubernetes/ssl/admin.conf ~/.kube/config
配置 config 通用配置
cd /etc/kubernetes/cat > /etc/kubernetes/config <
配置 apiserver 配置文件
cd /etc/kubernetes/mkdir -pv /data/kubernetes/logs cat > /etc/kubernetes/apiserver <
注意:安全端口监听在 10.10.175.3,提供给 node 节点访问(当前服务器IP)非安全端口监听在 127.0.0.1,只提供给同一台机器上的 kube-controller-manager 和 kube-scheduler 访问,这样就保证了安全性和稳定性(IP可换成0.0.0.0)
配置 kube-apiserver 启动项文件
cat > /usr/lib/systemd/system/kube-apiserver.service <
启动 kube-apiserver
systemctl daemon-reload && \systemctl start kube-apiserver && \systemctl -l status kube-apiserverjournalctl -f -u kube-apiserverjournalctl -xesystemctl stop kube-apiservernetstat -ntlpsystemctl enable kube-apiserver
查看当前 master 集群状态
kubectl get cs
配置 controller-manager 配置文件
cd /etc/kubernetes/cat > /etc/kubernetes/controller-manager <
配置 kube-controller-manager 启动项文件
cat > /usr/lib/systemd/system/kube-controller-manager.service <
启动 kube-controller-manager 服务
systemctl daemon-reload && \systemctl start kube-controller-manager && \systemctl -l status kube-controller-managerjournalctl -f -u kube-controller-managerjournalctl -xesystemctl stop kube-controller-managernetstat -ntlpsystemctl enable kube-controller-manager
查看当前 master 集群状态
kubectl get cs
配置 scheduler 配置文件
cd /etc/kubernetes/cat > /etc/kubernetes/scheduler <
配置 kube-scheduler 启动项文件
cat > /usr/lib/systemd/system/kube-scheduler.service <
启动 kube-scheduler 服务
systemctl daemon-reload && \systemctl start kube-scheduler && \systemctl -l status kube-schedulerjournalctl -f -u kube-schedulerjournalctl -xesystemctl stop kube-schedulernetstat -ntlpsystemctl enable kube-scheduler
查看当前 master 集群状态
kubectl get cs
设置开机启动
systemctl enable kube-apiserversystemctl enable kube-controller-managersystemctl enable kube-scheduler
2、Master 作为 Node
配置 kubelet 配置文件(配置文件中的红色字体根据服务器当前情况变更)mkdir -pv /data/kubernetes/kubeletcd /etc/kubernetes/cat > /etc/kubernetes/kubelet <
启动kubelet时会调用pause-amd64:3.0镜像,可以使用如上配置调用阿里云镜像或者把镜像从gci.io下载下来用下面的方式加载(需×××)
cd /data/tools/kubernetes/images/docker imagesdocker load < gcr.io_google_containers_pause-amd64_3.0.tardocker pull gcr.io/google_containers/pause-amd64:3.0docker pull registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0
网络组件参数先不加,到后期做网络组件的时候再加
--network-plugin=cni \gcr.io/google_containers 可能下载不下来 需要更换国内的镜像--pod-infra-container-image=gcr.io/google_containers/pause-amd64:3.0配置 kubelet 启动项文件
cat > /usr/lib/systemd/system/kubelet.service <
创建 kubelet 数据文件目录
mkdir -pv /data/kubernetes/kubelet
启动 kubelet 服务
systemctl daemon-reload && \systemctl start kubelet && \systemctl -l status kubeletjournalctl -f -u kubeletjournalctl -xesystemctl stop kubeletsystemctl restart kubeletnetstat -ntlpsystemctl enable kubelet
授权 Kubernetes Node
在 master 节点,首先由于我们采用了 TLS Bootstrapping,所以需要先创建一个 ClusterRoleBinding在任意 master 节点 执行即可kubectl create clusterrolebinding kubelet-bootstrap \--clusterrole=system:node-bootstrapper \--user=kubelet-bootstrap
它需要启动 api-server 有了 8080 端口后再去执行
The connection to the server localhost:8080 was refused - did you specify the right host or port?如果在某台 master 上已经执行过了的话,再去其它主机上执行,就会报 已经创建了Error from server (AlreadyExists): clusterrolebindings.rbac.authorization.k8s.io "kubelet-bootstrap" already exists查看 node 节点证书
在 master 通过简单指令验证,会看到 node 节点处于 pendingkubectl get csrNAME AGE REQUESTOR CONDITIONnode-csr-5apDfsKujoNM61vlHpg1o3gboEYI5xaXsB54uniZLS8 7m kubelet-bootstrap Pending
通过 kubectl 来允许节点加入集群
kubectl get csr | grep Pending | awk '{print $1}' | xargs kubectl certificate approvekubectl get csr | awk '/Pending/ {print $1}' | xargs kubectl certificate approve
配置 kube-proxy 配置文件(配置文件根据服务器当前情况变更 )
Kube-proxy 是实现 Service 的关键组件,kube-proxy 会在每台节点上执行,然后监听 API Server 的 Service 与 Endpoint 资源对象的改变,然后来依据变化执行 iptables 来实现网络的转发。cd /etc/kubernetes/cat > /etc/kubernetes/proxy <
配置 kube-proxy 启动项文件
cat > /usr/lib/systemd/system/kube-proxy.service <
启动 kube-proxy 服务
systemctl daemon-reload && \systemctl start kube-proxy && \systemctl -l status kube-proxyjournalctl -f -u kube-proxyjournalctl -xesystemctl stop kube-proxynetstat -ntlpsystemctl enable kube-proxysystemctl daemon-reload && \systemctl stop kube-proxy && \systemctl start kube-proxy
master节点当成node时,每台maste节点都需要以下配置文件。在配置其他master节点时,可以从这台服务器把配置拷贝过去(相应更改下配置)再导入启动脚本即可
3、配置 Node节点
备注:确保etcd及kubeneter证书已存在到相应的目录,docker服务已安装如果使用的是kubernetes-server安装包就按照以下方式配置二进制脚本;如果下载的是kubernetes-node则把```kubernetes/node/bin/拷贝到/usr/bin/下cd /data/tools/kubernetes/v1.8.5/tar -xvf kubernetes-server-linux-amd64.tar.gzcd kubernetes/server/bin/cp kubelet kube-proxy kubectl /usr/bin/cp kubectl /usr/bin/
node配置文件
node节点服务需要配置kube-proxy和kubelet服务,安装配置过程和<Master 作为 Node>一样从master节点/etc/kubernetes下把配置文件audit-policy.yaml bootstrap.kubeconfig config kubelet kubelet.kubeconfig kube-proxy.kubeconfig proxy token.csv拷贝到node的/etc/kubernetesbootstrap.kubeconfig : server改成
conf :KUBE_MASTER这行注释掉kubelet.kubeconfig : server改成 kube-proxy.kubeconfig : server改成其他配置根据当前服务器的实际情况配置
创建 Nginx 代理由于 HA 方案基于 Nginx 反代实现,所以每个 Node 要启动一个 Nginx 负载均衡 Master,具体参考 HA Master 简述创建配置目录
mkdir -pv /etc/nginx写入代理配置
cat > /etc/nginx/nginx.conf << EOFerror_log stderr notice;worker_processes auto;events { multi_accept on; use epoll; worker_connections 1024;}stream { upstream kube_apiserver { least_conn; server 10.10.175.3:6443; server 10.10.188.125:6443; server 10.10.121.199:6443; } server { listen 0.0.0.0:6443; proxy_pass kube_apiserver; proxy_timeout 10m; proxy_connect_timeout 1s; }}EOF
更新权限
chmod +r /etc/nginx/nginx.conf
配置nginx启动脚本(nginx是以docker启动,会自动下载nginx:1.13.5-alpine 镜像)
cat > /etc/systemd/system/nginx-proxy.service << EOF[Unit]Description=kubernetes apiserver docker wrapperWants=docker.socketAfter=docker.service[Service]User=rootPermissionsStartOnly=trueExecStart=/usr/bin/docker run -p 127.0.0.1:6443:6443 \\ -v /etc/nginx:/etc/nginx \\ --name nginx-proxy \\ --net=host \\ --restart=on-failure:5 \\ --memory=512M \\ nginx:1.13.5-alpineExecStartPre=-/usr/bin/docker rm -f nginx-proxyExecStop=/usr/bin/docker stop nginx-proxyRestart=alwaysRestartSec=15sTimeoutStartSec=30s[Install]WantedBy=multi-user.targetEOFvim /etc/systemd/system/nginx-proxy.service
启动 Nginx 代理服务
systemctl daemon-reload && \systemctl start nginx-proxy && \systemctl -l status nginx-proxyjournalctl -f -u nginx-proxyjournalctl -xenetstat -ntlpsystemctl enable nginx-proxy
导入 docker calico 镜像
docker load < quay.io_calico_cni_v1.11.0.tardocker load < quay.io_calico_kube-controllers_v1.0.0.tardocker load < quay.io_calico_node_v2.6.1.tar
创建 calico 目录
mkdir -pv /etc/calico && \mkdir -pv /data/kubernetes/calico/cd /etc/calico/
修改 Calico 配置
Calico 部署采用 “混搭” 方式,即 Systemd 控制 calico node,cni 等由 kubernetes daemonset 安装具体请参考 Calico 部署踩坑记录,以下直接上代码获取 calico.yaml
wget -c "https://docs.projectcalico.org/v2.6/getting-started/kubernetes/installation/hosted/calico.yaml"
替换 Etcd 地址
sed -i 's@.*etcd_endpoints:.*@\ \ etcd_endpoints:\ \"https://10.10.175.3:2379,https://10.10.188.125:2379,https://10.10.121.199:2379\"@gi' calico.yaml
替换 Etcd 证书
export ETCD_CERT=`cat /etc/etcd/ssl/etcd.pem | base64 | tr -d '\n'`export ETCD_KEY=`cat /etc/etcd/ssl/etcd-key.pem | base64 | tr -d '\n'`export ETCD_CA=`cat /etc/etcd/ssl/ca.pem | base64 | tr -d '\n'`sed -i "s@.*etcd-cert:.*@\ \ etcd-cert:\ ${ETCD_CERT}@gi" calico.yamlsed -i "s@.*etcd-key:.*@\ \ etcd-key:\ ${ETCD_KEY}@gi" calico.yamlsed -i "s@.*etcd-ca:.*@\ \ etcd-ca:\ ${ETCD_CA}@gi" calico.yamlsed -i 's@.*etcd_ca:.*@\ \ etcd_ca:\ "/calico-secrets/etcd-ca"@gi' calico.yamlsed -i 's@.*etcd_cert:.*@\ \ etcd_cert:\ "/calico-secrets/etcd-cert"@gi' calico.yamlsed -i 's@.*etcd_key:.*@\ \ etcd_key:\ "/calico-secrets/etcd-key"@gi' calico.yaml
注释掉 calico-node 部分(由 Systemd 接管)
sed -i '103,197s@.*@#&@gi' calico.yaml
创建 Calico Daemonset
cd /etc/calico/先创建 RBACwget -c ""kubectl apply -f rbac.yaml再创建 Calico Daemonsetkubectl create -f calico.yaml删除 Calico Daemonsetkubectl delete -f calico.yaml创建 Systemd 文件
上一步注释了 calico.yaml 中 Calico Node 相关内容,为了防止自动获取 IP 出现问题,将其移动到 Systemd,Systemd service 配置如下,每个节点都要安装 calico-node 的 Service,其他节点请自行修改 IP和主机名(别问为啥是两个反引号 \,自己试就知道了)cat > /usr/lib/systemd/system/calico-node.service <
修改 kubelet 配置
根据官方文档要求 kubelet 配置必须增加 --network-plugin=cni 选项,所以需要修改 kubelet 配置vim /etc/kubernetes/kubelet #### kubernetes kubelet (minion) config# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)KUBELET_ADDRESS="--address=10.10.175.3"# The port for the info server to serve on# KUBELET_PORT="--port=10250"# You may leave this blank to use the actual hostnameKUBELET_HOSTNAME="--hostname-override=k8smaster01.test.com"# location of the api-server# KUBELET_API_SERVER=""# Add your own!KUBELET_ARGS="--cgroup-driver=cgroupfs \ --network-plugin=cni \ --cluster-dns=10.254.0.2 \ --resolv-conf=/etc/resolv.conf \ --experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \ --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \ --fail-swap-on=false \ --cert-dir=/etc/kubernetes/ssl \ --cluster-domain=cluster.local. \ --hairpin-mode=promiscuous-bridge \ --serialize-image-pulls=false \ --pod-infra-container-image=gcr.io/google_containers/pause-amd64:3.0"EOFvim /etc/kubernetes/kubelet
重启 kubelet
systemctl daemon-reload && \systemctl stop kubelet && \systemctl start kubelet
启动 calico-node 服务
systemctl enable calico-nodesystemctl start calico-nodesystemctl -l status calico-nodejournalctl -f -u calico-nodejournalctl -xesystemctl stop calico-nodenetstat -ntlp
测试跨主机通信
创建 deploymentcat >> /data/kubernetes/calico/demo.deploy.yml <
安装 calicoctl
下载 calicoctl 分发到各个 node 节点cd /data/tools/kubernetes/wget -c https://github.com/projectcalico/calicoctl/releases/download/v1.6.1/calicoctlchmod +x calicoctlcp calicoctl /usr/bin/rsync -avzP calicoctl root@10.10.188.125:/usr/local/bin/rsync -avzP calicoctl root@10.10.121.199:/usr/local/bin/
在节点上查看 calico 的状态
calicoctl node status
查看创建结果
kubectl get podkubectl get pod -o wide验证 calico
kubectl get pods -n kube-systemkubectl get deployment
kubectl get svc
kubectl get pod -o wide -n kube-system
kubectl get svc,po -o wide --all-namespaces进入其中一个 Pod,ping 另一个 Pod 的 IP 测试
kubectl exec -it demo-deployment-5fc9c54fb4-gnh8b bashping 10.254.x.xcurl 10.254.x.x查看 pending 的 pod 是否已执行
kubectl -n kube-system get poDNS只需要在其中一台主节点服务器安装即可
导入 kube-dns docker 镜像docker pull foxchan/k8s-dns-kube-dns-amd64:1.14.7docker pull foxchan/k8s-dns-dnsmasq-nanny-amd64:1.14.7docker pull foxchan/k8s-dns-sidecar-amd64:1.14.7docker pull registry.cn-hangzhou.aliyuncs.com/linkcloud/cluster-proportional-autoscaler-amd64:1.1.2
获取对应的 yaml 文件
cd /etc/kubernetes/wget -c mv kube-dns.yaml.sed kube-dns.yaml修改配置
sed -i 's/$DNS_DOMAIN/cluster.local/gi' kube-dns.yamlsed -i 's/$DNS_SERVER_IP/10.254.0.2/gi' kube-dns.yaml
vim /etc/kubernetes/kube-dns.yaml...省略... - --domain=cluster.local. - --kube-master-url=http://10.10.175.3:8080 - --dns-port=10053 - --config-dir=/kube-dns-config - --v=2...省略...
修改镜像路径
:%s#gcr.io/google_containers#foxchan#g
创建
kubectl create -f kube-dns.yamlkubectl delete -f kube-dns.yaml
部署 DNS 自动扩容部署
wget 修改dns-horizontal-autoscaler.yaml中镜像的路径,改成 registry.cn-hangzhou.aliyuncs.com/linkcloud/cluster-proportional-autoscaler-amd64:1.1.2kubectl create -f dns-horizontal-autoscaler.yaml# kubectl delete -f dns-horizontal-autoscaler.yaml
查看创建结果
kubectl get pods -o wide -n kube-system导入 kubernetes-dashboard 镜像
阿里云镜像库:docker load < kubernetes-dashboard-amd64.v1.8.0.tarDashboard 是 Kubernetes 社区官方开发的仪表板,有了仪表板后管理者就能够透过 Web-based 方式来管理 Kubernetes 集群,除了提升管理方便,也让资源可视化,让人更直接看见系统信息的呈现结果。
首先我们要建立 kubernetes-dashboard-certs,来提供给 Dashboard TLS 使用:cd /etc/kubernetes/
wget -cvim kubernetes-dashboard.yaml
126行左右...省略...- --apiserver-host=http://10.10.175.3:8080
...省略...
创建 kubernetes-dashboard
kubectl create -f kubernetes-dashboard.yamlkubectl delete -f kubernetes-dashboard.yaml查看创建结果
kubectl get pods -n kube-system -o widekubectl get svc -n kube-system检查 kubernetes-dashboard 服务
kubectl get svc,po -o wide --all-namespaceskubectl get pods -n kube-system | grep dashboard这里我们使用 token 认证,那么 token 来自于哪里呢,我们创建一个 kubernetes-dashboard-rbac.yaml 内容如下
创建一个 kubernetes-dashboard-rbac.yamlcd /etc/kubernetes/cat > kubernetes-dashboard-rbac.yaml <
创建之后,我们来获取它的 token 值
我们看到这里的 serviceaccount 是在 kube-system 的 default 的,所以我们直接查看 kube-system 中的 default secret 就可以了然后执行kubectl -n kube-system get secret找到 default-token-XXXX 的字段执行 kubectl create -f kubernetes-dashboard-rbac.yamlkubectl create -f kubernetes-dashboard-rbac.yamlclusterrolebinding "dashboard-admin" createdkubectl delete -f kubernetes-dashboard-rbac.yaml
最后找到 default-token-XXXX 的字段以后,执行
kubectl describe secret default-token-XXXX -n kube-systemkubectl describe secret default-token-w5htr -n kube-system就能获取到 tokenkubectl -n kube-system get po,svc -l k8s-app=kubernetes-dashboardNAME READY STATUS RESTARTS AGEpo/kubernetes-dashboard-766666b68c-q2l28 1/1 Running 0 23hNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEsvc/kubernetes-dashboard ClusterIP 10.254.8.65443/TCP 23h
完成后,就可以透过浏览器访问 Dashboard
访问 kubernetes dashboard
查看 docker 日志
docker logs <CONTAINER ID>docker logs 546a1f7e2153转载于:https://blog.51cto.com/irow10/2055064