跳到主要内容

ansible安装k8s-1.24.12

环境配置

角色IP描述
K8s-master1192.168.10.10kube-scheduler,kubelet,kube-proxy,docker,keepalived,etcd
K8s-master2192.168.10.11kube-scheduler,kubelet,kube-proxy,docker,keepalived,etcd
K8s-node1192.168.10.12kubelet,kube-proxy,docker
ansible192.168.10.100ansible

一.配置高可用负载均衡keepalived

1.安装keepalived
yum -y install ipvsadm keepalived haproxy
2.master1上配置keepalived
[root@k8s-master1 ~]# vim /etc/keepalived/keepalived.conf
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 88
priority 100
advert_int 2
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.10.88
}
}
3.master2上配置keepalived
[root@k8s-master2 ~]# cat  /etc/keepalived/keepalived.conf
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 88
priority 80
advert_int 2
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.10.88
}
}

4.重启keepalived
systemctl restart keepalived
systemctl enable keepalived

二.配置haproxy

1.master1上配置haproxy
[root@k8s-master1 ~]# vim /etc/haproxy/haproxy.cfg
listen k8s_6443
mode tcp
bind 192.168.10.88:6443
server 192.168.10.10 192.168.10.10:6443 check inter 5s rise 2 fall 3
server 192.168.10.11 192.168.10.11:6443 check inter 5s rise 2 fall 3
2.master2上配置haproxy
[root@k8s-master1 ~]# vim /etc/haproxy/haproxy.cfg
listen k8s_6443
mode tcp
bind 192.168.10.88:6443
server 192.168.10.10 192.168.10.10:6443 check inter 5s rise 2 fall 3
server 192.168.10.11 192.168.10.11:6443 check inter 5s rise 2 fall 3
3.重启haproxy
systemctl restart haproxy
systemctl enable haproxy

三.安装k8s(master上配置)

1.安装ansible
yum -y install ansible
2.配置各个节点免密登陆
ssh-keygen -t rsa
ssh-copy-id -i .ssh/id_rsa.pub root@192.168.10.10
ssh-copy-id -i .ssh/id_rsa.pub root@192.168.10.11
ssh-copy-id -i .ssh/id_rsa.pub root@192.168.10.12
3.yum安装docker
3.1下载依赖软件
[root@jenkins ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
3.2下载docker源
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo 
3.3安装docker
[root@jenkins ~]# yum -y install docker-ce docker-ce-cli containerd.io
3.54.配置镜像加速,设置文件目录
cat > /etc/docker/daemon.json << EOF
{
"registry-mirrors": ["https://7jauxlsb.mirror.aliyuncs.com"],
"insecure-registries":["192.168.3.133"],
"data-root": "/data/docker"
}
EOF
3.6重启docker
systemctl daemon-reload 
systemctl enable docker
systemctl restart docker
4.部署节点的各项目组件
4.1安装ezdown
wget https://github.com/easzlab/kubeasz/releases/download/3.3.5/ezdown
chmod +x ./ezdown
4.2初始化ezdown
[root@k8s-master1 ~]# vim ezdown
DOCKER_VER=20.10.22
KUBEASZ_VER=3.3.5
K8S_BIN_VER=v1.24.12
[root@k8s-master1 ~]# ./ezdown -D
4.3生成ansible hosts文件
[root@k8s-master1 ~]# cd /etc/kubeasz/
[root@k8s-master1 kubeasz]# ./ezctl new k8s-cluster1
4.4编辑ansible hosts文件
[etcd]
192.168.10.10
192.168.10.11
192.168.10.12

[kube_master]
192.168.10.10 k8s_nodename='master01'
192.168.10.11 k8s_nodename='master02'

[kube_node]
192.168.10.12 k8s_nodename='node01'

[harbor]

[ex_lb]
192.168.10.11 LB_ROLE=backup EX_APISERVER_VIP=192.168.10.88 EX_APISERVER_PORT=8443
192.168.10.10 LB_ROLE=master EX_APISERVER_VIP=192.168.10.88 EX_APISERVER_PORT=8443

[chrony]

[all:vars]
SECURE_PORT="6443"

CONTAINER_RUNTIME="containerd"

CLUSTER_NETWORK="calico"

PROXY_MODE="ipvs"

SERVICE_CIDR="10.10.0.0/16"

CLUSTER_CIDR="172.10.0.0/16"

NODE_PORT_RANGE="1-65535"

CLUSTER_DNS_DOMAIN="cluster.local"

bin_dir="/usr/bin"

base_dir="/etc/kubeasz"

cluster_dir="{{ base_dir }}/clusters/1.24.12"

ca_dir="/etc/kubernetes/ssl"

k8s_nodename=''

4.5编辑config.yml文件
INSTALL_SOURCE: "online"

OS_HARDEN: false


CA_EXPIRY: "876000h"
CERT_EXPIRY: "438000h"

CHANGE_CA: false

CLUSTER_NAME: "cluster1"
CONTEXT_NAME: "context-{{ CLUSTER_NAME }}"

K8S_VER: "1.24.12"

K8S_NODENAME: "{%- if k8s_nodename != '' -%} \
{{ k8s_nodename|replace('_', '-')|lower }} \
{%- else -%} \
{{ inventory_hostname }} \
{%- endif -%}"

ETCD_DATA_DIR: "/var/lib/etcd"
ETCD_WAL_DIR: ""


ENABLE_MIRROR_REGISTRY: true

SANDBOX_IMAGE: "192.168.3.133/tools/pause:3.9"

CONTAINERD_STORAGE_DIR: "/data/containerd"

DOCKER_STORAGE_DIR: "/data/docker"

ENABLE_REMOTE_API: false

INSECURE_REG: '["192.168.3.133"]'


MASTER_CERT_HOSTS:
- "10.1.1.1"
- "k8s.easzlab.io"

NODE_CIDR_LEN: 24


KUBELET_ROOT_DIR: "/var/lib/kubelet"

MAX_PODS: 300

KUBE_RESERVED_ENABLED: "no"

SYS_RESERVED_ENABLED: "no"


FLANNEL_BACKEND: "vxlan"
DIRECT_ROUTING: false

flannel_ver: "v0.19.2"

CALICO_IPV4POOL_IPIP: "Always"

IP_AUTODETECTION_METHOD: "can-reach={{ groups['kube_master'][0] }}"

CALICO_NETWORKING_BACKEND: "brid"

CALICO_RR_ENABLED: false

CALICO_RR_NODES: []

calico_ver: "v3.24.5"

calico_ver_main: "{{ calico_ver.split('.')[0] }}.{{ calico_ver.split('.')[1] }}"

cilium_ver: "1.12.4"
cilium_connectivity_check: true
cilium_hubble_enabled: false
cilium_hubble_ui_enabled: false

OVN_DB_NODE: "{{ groups['kube_master'][0] }}"

kube_ovn_ver: "v1.5.3"

OVERLAY_TYPE: "full"

FIREWALL_ENABLE: true

kube_router_ver: "v0.3.1"
busybox_ver: "1.28.4"


dns_install: "yes"
corednsVer: "1.9.3"
ENABLE_LOCAL_DNS_CACHE: true
dnsNodeCacheVer: "1.22.13"
LOCAL_DNS_CACHE: "10.10.0.2"

metricsserver_install: "yes"
metricsVer: "v0.5.2"

dashboard_install: "no"
dashboardVer: "v2.7.0"
dashboardMetricsScraperVer: "v1.0.8"

prom_install: "no"
prom_namespace: "monitor"
prom_chart_ver: "39.11.0"

nfs_provisioner_install: "no"
nfs_provisioner_namespace: "kube-system"
nfs_provisioner_ver: "v4.0.2"
nfs_storage_class: "managed-nfs-storage"
nfs_server: "192.168.1.10"
nfs_path: "/data/nfs"

network_check_enabled: false
network_check_schedule: "*/5 * * * *"

HARBOR_VER: "v2.6.3"
HARBOR_DOMAIN: "harbor.easzlab.io.local"
HARBOR_PATH: /var/data
HARBOR_TLS_PORT: 8443
HARBOR_REGISTRY: "{{ HARBOR_DOMAIN }}:{{ HARBOR_TLS_PORT }}"

HARBOR_SELF_SIGNED_CERT: true

HARBOR_WITH_NOTARY: false
HARBOR_WITH_TRIVY: false
HARBOR_WITH_CHARTMUSEUM: true

4.6配置启动环境

一键部署:ezctl setup k8s-01 all

[root@k8s-master1 kubeasz]# ./ezctl setup 1.24.12 01
4.7配置etcd
[root@k8s-master1 kubeasz]# ./ezctl setup 1.24.12 02
//查看etcd状态
etcdctl --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem --endpoints="https://192.168.3.10.10:2379,https://192.168.10.11:2379,https://192.168.10.12:2379" endpoint health --write-out=table
4.8安装containerd
[root@k8s-master1 kubeasz]# ./ezctl setup 1.24.12 03

安装完containerd后,修改containerd的镜像地址

[root@localhost ~]# vim /etc/containerd/config.toml
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://8aj710su.mirror.aliyuncs.com" ,"https://registry-1.docker.io"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."192.168.3.133"]
endpoint = ["http://192.168.3.133:80"]
[plugins."io.containerd.grpc.v1.cri".registry.configs]
[plugins."io.containerd.grpc.v1.cri".registry.configs."192.168.3.133".tls]
insecure_skip_verify = true
[plugins."io.containerd.grpc.v1.cri".registry.configs."192.168.3.133".auth]
username = "admin"
password = "123456"
[root@k8s-master1 ]# systemctl restart containerd
[root@k8s-master1 ]# systemctl enable containerd
4.9安装master节点
[root@k8s-master1 kubeasz]# ./ezctl setup 1.24.12 04
4.10安装node节点
[root@k8s-master1 kubeasz]# ./ezctl setup 1.24.12 05

5.部署网络服务

下载镜像后上传到自己的harbor仓库

1.部署calico(修改镜像)
[root@k8s-master1 kubeasz]# ./ezctl setup 1.24.12 06
6.部署coredns和metrics(修改镜像)
[root@k8s-master1 kubeasz]# ./ezctl setup 1.24.12 07