Single Master Cluster Deployment
- Minimum node requirement: 1 or 2 nodes.
- Once the Master node fails, tools like kubectl will be unable to manage the cluster, but running containers will not be affected.
- For streamlined clusters, a single Master cluster deployment is used when there are two microservice nodes.
This document deploys a Kubernetes cluster based on the CentOS 7.9 / Debian 12 operating system
Server IP | Host Role |
---|---|
192.168.10.20 | Kubernetes 01 (Master, Node) |
192.168.10.21 | Kubernetes 02 (Node) |
Server Requirements
- No network policy restrictions between cluster servers
- Hostnames must not be duplicated among cluster servers
- Main network card MAC addresses must not be duplicated [check with
ip link
] product_id
must not be duplicated [check withcat /sys/class/dmi/id/product_uuid
]- Port 6443 of kubelet must not be in use [verify with
nc -vz 127.0.0.1 6443
] - Disable swap memory [execute
swapoff -a
to disable and edit/etc/fstab
to disable swap partition mount]
Install CRI Container Runtime Environment
All nodes in the Kubernetes cluster need to perform these operations
-
Download the docker installation package
- Server with Internet Access
- Server without Internet Access
wget https://pdpublic.mingdao.com/private-deployment/offline/common/docker-27.3.1.tgz
# Link to download docker installation package, upload to target server after downloading
https://pdpublic.mingdao.com/private-deployment/offline/common/docker-27.3.1.tgz -
Install docker
tar -zxvf docker-27.3.1.tgz
mv -f docker/* /usr/local/bin/ -
Create directories for docker and containerd configuration files
mkdir /etc/docker
mkdir /etc/containerd -
Create the
daemon.json
file for dockercat > /etc/docker/daemon.json <<\EOF
{
"registry-mirrors": ["https://uvlkeb6d.mirror.aliyuncs.com"],
"data-root": "/data/docker",
"max-concurrent-downloads": 10,
"exec-opts": ["native.cgroupdriver=cgroupfs"],
"storage-driver": "overlay2",
"default-address-pools":[{"base":"172.80.0.0/16","size":24}],
"insecure-registries": ["127.0.0.1:5000"]
}
EOF -
Create containerd's config.toml file
cat > /etc/containerd/config.toml <<\EOF
disabled_plugins = []
imports = []
oom_score = 0
plugin_dir = ""
required_plugins = []
root = "/data/containerd"
state = "/run/containerd"
temp = ""
version = 2
[cgroup]
path = ""
[debug]
address = ""
format = ""
gid = 0
level = ""
uid = 0
[grpc]
address = "/var/run/containerd/containerd.sock"
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
tcp_address = ""
tcp_tls_ca = ""
tcp_tls_cert = ""
tcp_tls_key = ""
uid = 0
[metrics]
address = ""
grpc_histogram = false
[plugins]
[plugins."io.containerd.gc.v1.scheduler"]
deletion_threshold = 0
mutation_threshold = 100
pause_threshold = 0.02
schedule_delay = "0s"
startup_delay = "100ms"
[plugins."io.containerd.grpc.v1.cri"]
device_ownership_from_security_context = false
disable_apparmor = false
disable_cgroup = false
disable_hugetlb_controller = true
disable_proc_mount = false
disable_tcp_service = true
enable_selinux = false
enable_tls_streaming = false
enable_unprivileged_icmp = false
enable_unprivileged_ports = false
ignore_image_defined_volumes = false
max_concurrent_downloads = 3
max_container_log_line_size = 16384
netns_mounts_under_state_dir = false
restrict_oom_score_adj = false
sandbox_image = "127.0.0.1:5000/pause:3.8"
selinux_category_range = 1024
stats_collect_period = 10
stream_idle_timeout = "4h0m0s"
stream_server_address = "127.0.0.1"
stream_server_port = "0"
systemd_cgroup = false
tolerate_missing_hugetlb_controller = true
unset_seccomp_profile = ""
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/usr/local/kubernetes/cni/bin"
conf_dir = "/etc/cni/net.d"
conf_template = ""
ip_pref = ""
max_conf_num = 1
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "runc"
disable_snapshot_annotations = true
discard_unpacked_layers = false
ignore_rdt_not_enabled_errors = false
no_pivot = false
snapshotter = "overlayfs"
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
BinaryName = ""
CriuImagePath = ""
CriuPath = ""
CriuWorkPath = ""
IoGid = 0
IoUid = 0
NoNewKeyring = false
NoPivotRoot = false
Root = ""
ShimCgroup = ""
SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]
[plugins."io.containerd.grpc.v1.cri".image_decryption]
key_model = "node"
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = ""
[plugins."io.containerd.grpc.v1.cri".registry.auths]
[plugins."io.containerd.grpc.v1.cri".registry.configs]
[plugins."io.containerd.grpc.v1.cri".registry.headers]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
tls_cert_file = ""
tls_key_file = ""
[plugins."io.containerd.internal.v1.opt"]
path = "/opt/containerd"
[plugins."io.containerd.internal.v1.restart"]
interval = "10s"
[plugins."io.containerd.internal.v1.tracing"]
sampling_ratio = 1.0
service_name = "containerd"
[plugins."io.containerd.metadata.v1.bolt"]
content_sharing_policy = "shared"
[plugins."io.containerd.monitor.v1.cgroups"]
no_prometheus = false
[plugins."io.containerd.runtime.v1.linux"]
no_shim = false
runtime = "runc"
runtime_root = ""
shim = "containerd-shim"
shim_debug = false
[plugins."io.containerd.runtime.v2.task"]
platforms = ["linux/amd64"]
sched_core = false
[plugins."io.containerd.service.v1.diff-service"]
default = ["walking"]
[plugins."io.containerd.service.v1.tasks-service"]
rdt_config_file = ""
[plugins."io.containerd.snapshotter.v1.aufs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.btrfs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.devmapper"]
async_remove = false
base_image_size = ""
discard_blocks = false
fs_options = ""
fs_type = ""
pool_name = ""
root_path = ""
[plugins."io.containerd.snapshotter.v1.native"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.overlayfs"]
root_path = ""
upperdir_label = false
[plugins."io.containerd.snapshotter.v1.zfs"]
root_path = ""
[plugins."io.containerd.tracing.processor.v1.otlp"]
endpoint = ""
insecure = false
protocol = ""
[proxy_plugins]
[stream_processors]
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar"
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar+gzip"
[timeouts]
"io.containerd.timeout.bolt.open" = "0s"
"io.containerd.timeout.shim.cleanup" = "5s"
"io.containerd.timeout.shim.load" = "5s"
"io.containerd.timeout.shim.shutdown" = "3s"
"io.containerd.timeout.task.state" = "2s"
[ttrpc]
address = ""
gid = 0
uid = 0
EOF -
Configure the systemd file for Docker
cat > /etc/systemd/system/docker.service <<EOF
[Unit]
Description=Docker
After=network-online.target
Wants=network-online.target
Requires=containerd.service
[Service]
Type=notify
ExecStart=/usr/local/bin/dockerd --containerd /var/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP \$MAINPID
LimitNOFILE=1024000
LimitNPROC=infinity
LimitCORE=0
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
EOF -
Configure the systemd file for containerd
cat > /etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
ExecStart=/usr/local/bin/containerd --config /etc/containerd/config.toml
LimitNOFILE=1024000
LimitNPROC=infinity
LimitCORE=0
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
EOF -
Start containerd and Docker and enable them to start on boot
systemctl daemon-reload && systemctl restart containerd && systemctl enable containerd
systemctl daemon-reload && systemctl restart docker && systemctl enable docker
Install CNI Plugins
This must be done on each node of the Kubernetes cluster
-
Download the CNI plugin files
- Server with Internet Access
- Server without Internet Access
wget https://pdpublic.mingdao.com/private-deployment/offline/common/kubernetes-1.25.4/cni-plugins-linux-amd64-v1.1.1.tgz
# CNI plugin package download link, download and upload it to the target server
https://pdpublic.mingdao.com/private-deployment/offline/common/kubernetes-1.25.4/cni-plugins-linux-amd64-v1.1.1.tgz -
Create the CNI file installation directory
mkdir -p /usr/local/kubernetes/cni/bin
-
Extract the CNI plugins to the installation directory
tar -zxvf cni-plugins-linux-amd64-v1.1.1.tgz -C /usr/local/kubernetes/cni/bin
Commands Required for Installing K8S Cluster
Install crictl/kubeadm/kubelet/kubectl commands, which are needed on each node of the Kubernetes cluster.
-
Create a directory for command installation
mkdir -p /usr/local/kubernetes/bin
-
Download the command files to the installation directory
- Server with Internet Access
- Server without Internet Access
wget https://pdpublic.mingdao.com/private-deployment/offline/common/kubernetes-1.25.4/crictl-v1.25.0-linux-amd64.tar.gz
tar -zxvf crictl-v1.25.0-linux-amd64.tar.gz -C /usr/local/kubernetes/bin
curl -o /usr/local/kubernetes/bin/kubeadm https://pdpublic.mingdao.com/private-deployment/offline/common/kubernetes-1.25.4/kubeadm
curl -o /usr/local/kubernetes/bin/kubelet https://pdpublic.mingdao.com/private-deployment/offline/common/kubernetes-1.25.4/kubelet
curl -o /usr/local/kubernetes/bin/kubectl https://pdpublic.mingdao.com/private-deployment/offline/common/kubernetes-1.25.4/kubectl# Download link for crictl file. After downloading, upload to the target server and extract to the /usr/local/kubernetes/bin directory
https://pdpublic.mingdao.com/private-deployment/offline/common/kubernetes-1.25.4/crictl-v1.25.0-linux-amd64.tar.gz
tar -zxvf crictl-v1.25.0-linux-amd64.tar.gz -C /usr/local/kubernetes/bin
# Download link for kubeadm file. After downloading, upload to the /usr/local/kubernetes/bin/ directory on the target server
https://pdpublic.mingdao.com/private-deployment/offline/common/kubernetes-1.25.4/kubeadm
# Download link for kubelet file. After downloading, upload to the /usr/local/kubernetes/bin/ directory on the target server
https://pdpublic.mingdao.com/private-deployment/offline/common/kubernetes-1.25.4/kubelet
# Download link for kubectl file. After downloading, upload to the /usr/local/kubernetes/bin/ directory on the target server
https://pdpublic.mingdao.com/private-deployment/offline/common/kubernetes-1.25.4/kubectl -
Grant executable permissions to the command files
chmod +x /usr/local/kubernetes/bin/*
chown $(whoami):$(groups) /usr/local/kubernetes/bin/* -
Configure systemd to manage kubelet
cat > /etc/systemd/system/kubelet.service <<\EOF
[Unit]
Description=kubelet: The Kubernetes Node Agent
Documentation=https://kubernetes.io/docs/home/
Wants=network-online.target
After=network-online.target
[Service]
ExecStart=/usr/local/kubernetes/bin/kubelet
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF -
Configure systemd to manage kubeadm
mkdir -p /etc/systemd/system/kubelet.service.d
cat > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf <<\EOF
# Note: This dropin only works with kubeadm and kubelet v1.11+
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
EnvironmentFile=-/etc/default/kubelet
ExecStart=
ExecStart=/usr/local/kubernetes/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS
EOF -
Start kubelet and enable it to start on boot
systemctl daemon-reload && systemctl restart kubelet && systemctl enable kubelet
- It is not necessary to check the service status after restart; the service will automatically come back up after subsequent steps
kubeadm init
andkubeadm join
.
- It is not necessary to check the service status after restart; the service will automatically come back up after subsequent steps
-
Configure the directory of K8S commands and add to environment variables
- CentOS
- Debian
export PATH=/usr/local/kubernetes/bin/:$PATH
echo 'export PATH=/usr/local/kubernetes/bin/:$PATH' >> /etc/bashrcexport PATH=/usr/local/kubernetes/bin/:$PATH
echo 'export PATH=/usr/local/kubernetes/bin/:$PATH' >> /etc/bash.bashrc -
Configure to prevent crictl image pull errors
crictl config runtime-endpoint unix:///run/containerd/containerd.sock
Install Environment Dependencies
Each node of the Kubernetes cluster requires this operation
-
Install environment dependencies socat/conntrack
- Server with Internet Access
- Server without Internet Access
# Use yum for centos / redhat
yum install -y socat conntrack-tools
# Use apt for debian / ubuntu
apt install -y socat conntrack# Download link for the socat file package. After downloading, upload to the target server. (Using CentOS 7.9 here; if dependencies do not match, download again)
https://pdpublic.mingdao.com/private-deployment/offline/common/kubernetes-1.25.4/socat-deps-centos7.tar.gz
# Extract and install
tar -zxvf socat-deps-centos7.tar.gz
rpm -Uvh --nodeps socat-deps-centos7/*.rpm
# Download link for the conntrack file package. After downloading, upload to the target server. (Using CentOS 7.9 here; if dependencies do not match, download again)
https://pdpublic.mingdao.com/private-deployment/offline/common/kubernetes-1.25.4/conntrack-tools-deps-centos7.tar.gz
# Extract and install
tar -zxvf conntrack-tools-deps-centos7.tar.gz
rpm -Uvh --nodeps conntrack-tools-deps-centos7/*.rpm -
Check if commands are missing
docker --version && dockerd --version && pgrep -f 'dockerd' && crictl --version && kubeadm version && kubelet --version && kubectl version --client=true && socat -V | grep 'socat version' && conntrack --version && echo ok || echo error
- Output "ok" indicates normal, "error" means commands need to be completed based on the errors.
Modify Kernel Configuration
Each node of the Kubernetes cluster requires this operation
-
Add kernel modules
cat > /etc/modules-load.d/kubernetes.conf <<EOF
overlay
br_netfilter
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
EOF -
Load modules
modprobe overlay
modprobe br_netfilter
modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh -
Add kernel parameters
cat >> /etc/sysctl.conf <<EOF
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
vm.max_map_count = 262144
# MD Config
net.nf_conntrack_max = 524288
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_rmem = 8192 87380 16777216
net.ipv4.tcp_wmem = 8192 65536 16777216
net.ipv4.tcp_max_syn_backlog = 32768
net.core.netdev_max_backlog = 32768
net.core.netdev_budget = 600
net.core.somaxconn = 32768
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_timestamps = 1
net.ipv4.tcp_synack_retries = 2
net.ipv4.tcp_syn_retries = 2
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_fin_timeout = 2
net.ipv4.tcp_mem = 8388608 12582912 16777216
net.ipv4.ip_local_port_range = 1024 65000
net.ipv4.tcp_max_orphans = 16384
net.ipv4.tcp_keepalive_intvl = 10
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_time = 600
vm.max_map_count = 262144
net.netfilter.nf_conntrack_tcp_be_liberal = 0
net.netfilter.nf_conntrack_tcp_max_retrans = 3
net.netfilter.nf_conntrack_tcp_timeout_max_retrans = 300
net.netfilter.nf_conntrack_tcp_timeout_established = 86400
fs.inotify.max_user_watches=10485760
fs.inotify.max_user_instances=10240
EOF
sysctl --system
K8S Environment Image Preparation
Each node of the Kubernetes cluster requires this operation
-
Load offline images
- Server with Internet Access
- Server without Internet Access
wget https://pdpublic.mingdao.com/private-deployment/offline/common/kubernetes-1.25.4/kubeadm-1.25.4-images.tar.gz
docker load -i kubeadm-1.25.4-images.tar.gz# Download link for offline image file package. After downloading, upload to the target server and load the image
https://pdpublic.mingdao.com/private-deployment/offline/common/kubernetes-1.25.4/kubeadm-1.25.4-images.tar.gz
docker load -i kubeadm-1.25.4-images.tar.gz -
Start local registry and tag images
docker run -d -p 5000:5000 --restart always --name registry registry:2
for i in $(docker images | grep 'registry.k8s.io\|rancher' | awk 'NR!=0{print $1":"$2}');do docker tag $i $(echo $i | sed -e "s/registry.k8s.io/127.0.0.1:5000/" -e "s#coredns/##" -e "s/rancher/127.0.0.1:5000/");done
for i in $(docker images | grep :5000 | awk 'NR!=0{print $1":"$2}');do docker push $i;done
docker images | grep :5000
Master Node Configuration
Perform operations only on Kubernetes node 01
-
Initialize the master node
kubeadm init --cri-socket unix:///var/run/containerd/containerd.sock -v 5 --kubernetes-version=1.25.4 --image-repository=127.0.0.1:5000 --pod-network-cidr=10.244.0.0/16
- Upon successful initialization, the
kube join
command will be output. Save this output as it will be needed later.
- Upon successful initialization, the
-
Modify the range of usable nodePort ports
sed -i '/- kube-apiserver/a\ \ \ \ - --service-node-port-range=1024-32767' /etc/kubernetes/manifests/kube-apiserver.yaml
-
Set the configuration path
- CentOS
- Debian
export KUBECONFIG=/etc/kubernetes/admin.conf
echo 'export KUBECONFIG=/etc/kubernetes/admin.conf' >> /etc/bashrcexport KUBECONFIG=/etc/kubernetes/admin.conf
echo 'export KUBECONFIG=/etc/kubernetes/admin.conf' >> /etc/bash.bashrc -
Adjust Pod limit on the current node
echo "maxPods: 300" >> /var/lib/kubelet/config.yaml
systemctl restart kubelet -
Allow master node to participate in scheduling
-
There may be a need to wait approximately two minutes after initializing the master node before executing the command below
-
Before executing, check the status of the
kubelet
service usingsystemctl status kubelet
to ensure it isrunning
kubectl taint node $(kubectl get node | grep control-plane | awk '{print $1}') node-role.kubernetes.io/control-plane:NoSchedule-
- The correct output after executing this command should be
"xxxx untainted"
. If the output is not as expected, wait briefly and execute it again to confirm
-
-
Install the network plugin
cat > /usr/local/kubernetes/kube-flannel.yml <<EOF
---
kind: Namespace
apiVersion: v1
metadata:
name: kube-flannel
labels:
pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
#image: flannelcni/flannel-cni-plugin:v1.1.0 for ppc64le and mips64le (dockerhub limitations may apply)
image: 127.0.0.1:5000/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
#image: flannelcni/flannel:v0.20.1 for ppc64le and mips64le (dockerhub limitations may apply)
image: 127.0.0.1:5000/mirrored-flannelcni-flannel:v0.20.1
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
#image: flannelcni/flannel:v0.20.1 for ppc64le and mips64le (dockerhub limitations may apply)
image: 127.0.0.1:5000/mirrored-flannelcni-flannel:v0.20.1
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: EVENT_QUEUE_DEPTH
value: "5000"
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /usr/local/kubernetes/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
EOF
kubectl apply -f /usr/local/kubernetes/kube-flannel.yml
Worker Node Configuration
Operations to be performed on Kubernetes node 02
-
Join the kubernetes cluster
kubeadm join 192.168.10.20:6443 --token 3nwjzw.pdod3r27lnqqhi0x \
--discovery-token-ca-cert-hash sha256:a84445303a0f8249e7eae3059cb99d46038dc275b2dc2043a022de187a1175a2- This command is output after successful execution of
kubeadm init
on the master node. The example shown is unique for each cluster - If forgotten, you can re-acquire it by executing
kubeadm token create --print-join-command
on the master node
- This command is output after successful execution of
-
Adjust Pod limit on the current node
echo "maxPods: 300" >> /var/lib/kubelet/config.yaml
systemctl restart kubelet
Cluster Status Check
-
Check node status
kubectl get pod -n kube-system # The READY column should be "1/1"
kubectl get node # The STATUS column should be "Ready" -
Download the image (to be performed on each microservice node)
Download and upload the centos:7.9.2009 image to each server in advance
Offline image download link: https://pdpublic.mingdao.com/private-deployment/offline/common/centos7.9.2009.tar.gz
Load the offline image on each server:
gunzip -d centos7.9.2009.tar.gz
ctr -n k8s.io image import centos7.9.2009.tar -
Write configuration and start test container only on microservice node 01
cat > /usr/local/kubernetes/test.yaml <<\EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: test
namespace: default
spec:
replicas: 3
selector:
matchLabels:
app: test
template:
metadata:
labels:
app: test
annotations:
md-update: '20200517104741'
spec:
containers:
- name: test
image: centos:7.9.2009
command:
- sh
- -c
- |
echo $(hostname) > hostname.txt
python -m SimpleHTTPServer
resources:
limits:
memory: 512Mi
cpu: 1
requests:
memory: 64Mi
cpu: 0.01
volumeMounts:
- name: tz-config
mountPath: /etc/localtime
volumes:
- name: tz-config
hostPath:
path: /usr/share/zoneinfo/Etc/GMT-8
---
apiVersion: v1
kind: Service
metadata:
name: test
namespace: default
spec:
selector:
app: test
ports:
- name: external-test
port: 8000
targetPort: 8000
nodePort: 8000
type: NodePort
EOF
kubectl apply -f /usr/local/kubernetes/test.yaml -
Check Pod status
kubectl get pod -o wide
-
Test access
curl 127.0.0.1:8000/hostname.txt
- Curling multiple times should normally return hostnames from different pods
-
If curling to containers on other nodes takes about 1 second, disable the offload feature of the flannel.1 network interface (this must be done on each node)
cat > /etc/systemd/system/disable-offload.service <<\EOF
[Unit]
Description=Disable offload for flannel.1
After=network-online.target flanneld.service
[Service]
Type=oneshot
ExecStartPre=/bin/bash -c 'while [ ! -d /sys/class/net/flannel.1 ]; do sleep 1; done'
ExecStart=/sbin/ethtool --offload flannel.1 rx off tx off
[Install]
WantedBy=multi-user.target
EOFReload the systemd configuration and start the service
systemctl daemon-reload
systemctl enable disable-offload
systemctl start disable-offload