Table of Contents Show
1. Uninstall steps
# uninstall:
kubeadm reset
# Cleanup:
kubeadm reset -f
modprobe -r ipip
lsmod
rm -rf ~/.kube/
rm -rf /etc/kubernetes/
rm -rf /etc/systemd/system/kubelet.service.d
rm -rf /etc/systemd/system/kubelet.service
rm -rf /usr/bin/kube*
rm -rf /etc/cni
rm -rf /opt/cni
rm -rf /var/lib/etcd
rm -rf /var/etcd
2. Process list
[root@teckbootcamps ~] ps -ef|grep kube
root 8395 26979 0 18:03 pts/1 00:00:00 grep --color=auto kube
root 20501 1 2 13:42 ? 00:06:50 /usr/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --pod-manifest-path=/etc/kubernetes/manifests --allow-privileged=true --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --authorization-mode=Webhook --client-ca-file=/etc/kubernetes/pki/ca.crt --cadvisor-port=0 --teckbootcamps-driver=systemd --rotate-certificates=true --cert-dir=/var/lib/kubelet/pki
root 20744 20728 0 13:42 ? 00:02:26 etcd --advertise-client-urls=https://127.0.0.1:2379 --cert-file=/etc/kubernetes/pki/etcd/server.crt --client-cert-auth=true --data-dir=/var/lib/etcd --initial-advertise-peer-urls=https://127.0.0.1:2380 --initial-cluster=teckbootcamps=https://127.0.0.1:2380 --key-file=/etc/kubernetes/pki/etcd/server.key --listen-client-urls=https://127.0.0.1:2379 --listen-peer-urls=https://127.0.0.1:2380 --name=teckbootcamps --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt --peer-client-cert-auth=true --peer-key-file=/etc/kubernetes/pki/etcd/peer.key --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt --snapshot-count=10000 --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
root 20793 20745 1 13:42 ? 00:03:56 kube-controller-manager --address=127.0.0.1 --allocate-node-cidrs=true --cluster-cidr=192.168.0.0/16 --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt --cluster-signing-key-file=/etc/kubernetes/pki/ca.key --controllers=*,bootstrapsigner,tokencleaner --kubeconfig=/etc/kubernetes/controller-manager.conf --leader-elect=true --node-cidr-mask-size=24 --root-ca-file=/etc/kubernetes/pki/ca.crt --service-account-private-key-file=/etc/kubernetes/pki/sa.key --use-service-account-credentials=true
root 20806 20746 1 13:42 ? 00:04:47 kube-apiserver --authorization-mode=Node,RBAC --advertise-address=172.17.211.142 --allow-privileged=true --client-ca-file=/etc/kubernetes/pki/ca.crt --disable-admission-plugins=PersistentVolumeLabel --enable-admission-plugins=NodeRestriction --enable-bootstrap-token-auth=true --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key --requestheader-allowed-names=front-proxy-client --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-key-file=/etc/kubernetes/pki/sa.pub --service-cluster-ip-range=10.96.0.0/12 --tls-cert-file=/etc/kubernetes/pki/apiserver.crt --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
root 20814 20760 0 13:42 ? 00:01:18 kube-scheduler --address=127.0.0.1 --kubeconfig=/etc/kubernetes/scheduler.conf --leader-elect=true
root 21095 21071 0 13:43 ? 00:00:22 /usr/local/bin/kube-proxy --config=/var/lib/kube-proxy/config.conf
root 22065 22047 0 13:43 ? 00:00:03 /usr/bin/kube-controllers
65534 22166 22137 0 13:43 ? 00:00:12 /heapster --source=kubernetes:https://kubernetes.default --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086
3. Restart method
[root@teckbootcamps ~]# swapoff -a && systemctl stop kubelet
4. Commonly used commands
1. View cluster-info
[root@teckbootcamps /]kubectl cluster-info
Kubernetes master is running at https://172.17.211.142:6443
Heapster is running at https://172.17.211.142:6443/api/v1/namespaces/kube-system/services/heapster/proxy
KubeDNS is running at https://172.17.211.142:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
monitoring-grafana is running at https://172.17.211.142:6443/api/v1/namespaces/kube-system/services/monitoring-grafana/proxy
monitoring-influxdb is running at https://172.17.211.142:6443/api/v1/namespaces/kube-system/services/monitoring-influxdb/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
[root@teckbootcamps /]kubectl cluster-info
Kubernetes master is running at https://172.17.211.142:6443
Heapster is running at https://172.17.211.142:6443/api/v1/namespaces/kube-system/services/heapster/proxy
KubeDNS is running at https://172.17.211.142:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
monitoring-grafana is running at https://172.17.211.142:6443/api/v1/namespaces/kube-system/services/monitoring-grafana/proxy
monitoring-influxdb is running at https://172.17.211.142:6443/api/v1/namespaces/kube-system/services/monitoring-influxdb/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
2. View the dump information of cluster-info
[root@teckbootcamps ~]kubectl cluster-info dump
{
"kind": "NodeList",
"apiVersion": "v1",
"metadata": {
"selfLink": "/api/v1/nodes",
"resourceVersion": "35732"
},
3. View deployment
[root@teckbootcamps ~]kubectl -n kube-system get deployments
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
calico-kube-controllers 1 1 1 1 98d
coredns 2 2 2 2 98d
heapster 1 1 1 1 98d
heapster-teckbootcamps 1 0 0 0 1h
heapster-teckbootcamps2 1 0 0 0 1h
kubernetes-dashboard 1 1 1 1 98d
monitoring-grafana 1 1 1 1 98d
monitoring-grafana-teckbootcamps 1 0 0 0 1h
monitoring-influxdb 1 1 1 1 98d
monitoring-influxdb-teckbootcamps 1 0 0 0 2h
4. Delete deployment
[root@teckbootcamps ~]kubectl -n kube-system delete deployment heapster-teckbootcamps
deployment.extensions "heapster-teckbootcamps" deleted
5. View services
[root@teckbootcamps shell]kubectl -n kube-system get svc -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
heapster ClusterIP 10.106.70.78 <none> 80/TCP 23h k8s-app=heapster
kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 23h k8s-app=kube-dns
kubelet ClusterIP None <none> 10250/TCP 23h <none>
kubernetes-dashboard NodePort 10.110.202.105 <none> 443:32000/TCP 23h k8s-app=kubernetes-dashboard
monitoring-grafana NodePort 10.98.68.122 <none> 80:32001/TCP 23h k8s-app=grafana
monitoring-influxdb ClusterIP 10.104.109.169 <none> 8086/TCP 23h k8s-app=influxdb
6. View nodes
[root@teckbootcamps ~]kubectl get nodes
NAME STATUS ROLES AGE VERSION
teckbootcamps Ready <none> 90d v1.26.0
teckbootcamps2 Ready <none> 90d v1.26.0
teckbootcamps3 Ready master 98d v1.26.0
7. View Service Account
[root@teckbootcamps3 ~]kubectl get sa --all-namespaces
NAMESPACE NAME SECRETS AGE
default default 1 98d
kube-public default 1 98d
kube-system attachdetach-controller 1 98d
kube-system bootstrap-signer 1 98d
kube-system calico-cni-plugin 1 98d
kube-system calico-kube-controllers 1 98d
kube-system certificate-controller 1 98d
kube-system clusterrole-aggregation-controller 1 98d
kube-system coredns 1 98d
kube-system cronjob-controller 1 98d
kube-system daemon-set-controller 1 98d
kube-system default 1 98d
kube-system deployment-controller 1 98d
kube-system disruption-controller 1 98d
kube-system endpoint-controller 1 98d
kube-system expand-controller 1 98d
kube-system generic-garbage-collector 1 98d
kube-system heapster 1 98d
kube-system heapster-teckbootcamps 0 5m
kube-system horizontal-pod-autoscaler 1 98d
kube-system job-controller 1 98d
kube-system kube-proxy 1 98d
kube-system kubernetes-dashboard 1 98d
kube-system namespace-controller 1 98d
kube-system node-controller 1 98d
kube-system persistent-volume-binder 1 98d
kube-system pod-garbage-collector 1 98d
kube-system pv-protection-controller 1 98d
kube-system pvc-protection-controller 1 98d
kube-system replicaset-controller 1 98d
kube-system replication-controller 1 98d
kube-system resourcequota-controller 1 98d
kube-system service-account-controller 1 98d
kube-system service-controller 1 98d
kube-system statefulset-controller 1 98d
kube-system token-cleaner 1 98d
kube-system ttl-controller 1 98d
9. View cluster DNS Service information
[root@teckbootcamps /]kubectl get service -l k8s-app=kube-dns --namespace=kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP 12d
10. View cluster DNS replication controllers
[root@teckbootcamps /]# kubectl get pod --selector k8s-app=kube-dns --namespace=kube-system
NAME READY STATUS RESTARTS AGE
coredns-78fcdf6894-m7rgl 1/1 Running 0 3d
coredns-78fcdf6894-tpkql 1/1 Running 0 3d
11. View cluster DNS services
[root@teckbootcamps /]kubectl get service -l k8s-app=kube-dns --namespace=kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP 12d
12. View components
[root@teckbootcamps /] kubectl -s https://172.17.211.142:6443 get componentstatus
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-0 Healthy {"health": "true"}
13. View endpoint
[root@teckbootcamps shell] kubectl get endpoints
NAME ENDPOINTS AGE
kubernetes 172.17.211.142:6443 23h
14. View node list
[root@teckbootcamps /]kubectl -s https://172.17.211.142:6443 get nodes
NAME STATUS ROLES AGE VERSION
teckbootcamps Ready <none> 3d v1.11.0
teckbootcamps2 Ready <none> 3d v1.11.0
teckbootcamps3 Ready master 12d v1.11.0
15. View node details
[root@teckbootcamps shell]# kubectl get node
NAME STATUS ROLES AGE VERSION
teckbootcamps Ready <none> 17m v1.14.4
teckbootcamps2 Ready <none> 13h v1.14.4
teckbootcamps3 Ready master 23h v1.14.4
[root@teckbootcamps shell] kubectl describe node teckbootcamps
Name: teckbootcamps
Roles: <none>
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=teckbootcamps
kubernetes.io/os=linux
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock
node.alpha.kubernetes.io/ttl: 0
projectcalico.org/IPv4Address: 172.17.211.143/20
projectcalico.org/IPv4IPIPTunnelAddr: 100.67.134.64
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sun, 28 Jul 2019 10:51:34 +0800
Taints: <none>
Unschedulable: false
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
NetworkUnavailable False Sun, 28 Jul 2019 10:51:58 +0800 Sun, 28 Jul 2019 10:51:58 +0800 CalicoIsUp Calico is running on this node
MemoryPressure False Sun, 28 Jul 2019 11:09:15 +0800 Sun, 28 Jul 2019 10:51:33 +0800 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sun, 28 Jul 2019 11:09:15 +0800 Sun, 28 Jul 2019 10:51:33 +0800 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sun, 28 Jul 2019 11:09:15 +0800 Sun, 28 Jul 2019 10:51:33 +0800 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sun, 28 Jul 2019 11:09:15 +0800 Sun, 28 Jul 2019 10:52:04 +0800 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 172.17.211.143
Hostname: teckbootcamps
Capacity:
cpu: 2
ephemeral-storage: 41152832Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 3882308Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 37926449909
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 3779908Ki
pods: 110
System Info:
Machine ID: 7d26c16f128042a684ea474c9e2c240f
System UUID: 09D50368-65D8-41BD-A923-FBCF9B8851AB
Boot ID: acc62473-6237-49e9-8bf8-222771e267e1
Kernel Version: 3.10.0-327.28.2.el7.x86_64
OS Image: CentOS Linux 7 (Core)
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://18.6.1
Kubelet Version: v1.14.4
Kube-Proxy Version: v1.14.4
PodCIDR: 100.64.2.0/24
Non-terminated Pods: (4 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system calico-node-stc89 250m (12%) 0 (0%) 0 (0%) 0 (0%) 18m
kube-system kube-proxy-qzplb 0 (0%) 0 (0%) 0 (0%) 0 (0%) 18m
kube-system kube-sealyun-lvscare-teckbootcamps 0 (0%) 0 (0%) 0 (0%) 0 (0%) 18m
monitoring node-exporter-tz6ms 112m (5%) 270m (13%) 200Mi (5%) 240Mi (6%) 18m
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 362m (18%) 270m (13%)
memory 200Mi (5%) 240Mi (6%)
ephemeral-storage 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 18m kubelet, teckbootcamps Starting kubelet.
Normal NodeHasSufficientMemory 18m kubelet, teckbootcamps Node teckbootcamps status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 18m kubelet, teckbootcamps Node teckbootcamps status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 18m kubelet, teckbootcamps Node teckbootcamps status is now: NodeHasSufficientPID
Normal Starting 18m kube-proxy, teckbootcamps Starting kube-proxy.
Normal NodeReady 17m kubelet, teckbootcamps Node teckbootcamps status is now: NodeReady
Warning ImageGCFailed 13m kubelet, teckbootcamps wanted to free 1747577241 bytes, but freed 1771511194 bytes space with errors in image deletion: [rpc error: code = Unknown desc = Error response from daemon: conflict: unable to delete abf312888d13 (must be forced) - image is being used by stopped container e5285e77b550, rpc error: code = Unknown desc = Error response from daemon: conflict: unable to remove repository reference "tutum/influxdb:0.13" (must force) - container c986b59b91ed is using its referenced image 39fa42a093e0, rpc error: code = Unknown desc = Error response from daemon: conflict: unable to remove repository reference "teckbootcamps-tomcat8-2:latest" (must force) - container ddc7e49946f1 is using its referenced image c375edce8dfd, rpc error: code = Unknown desc = Error response from daemon: conflict: unable to remove repository reference "teckbootcamps-tomcat8:latest" (must force) - container f627e4cb0dbc is using its referenced image 7e69e1b21246]
Warning FailedNodeAllocatableEnforcement 27s (x19 over 18m) kubelet, teckbootcamps Failed to update Node Allocatable Limits ["kubepods"]: failed to set supported teckbootcamps subsystems for teckbootcamps [kubepods]: Failed to find subsystem mount for required subsystem: pids
16. View kubelet configuration information
[root@teckbootcamps shell] kubectl config view
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: DATA+OMITTED
server: https://172.17.211.142:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubernetes-admin
name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
user:
client-certificate-data: REDACTED
client-key-data: REDACTED
17. Check the kubernets version
[root@teckbootcamps kubernets]kubelet --version
Kubernetes v1.28
18. View config
[root@teckbootcamps ~]kubeadm config view
apiServer:
extraArgs:
authorization-mode: Node,RBAC
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.14.4
networking:
dnsDomain: cluster.local
podSubnet: 100.64.0.0/10
serviceSubnet: 10.96.0.0/12
scheduler: {}
19. List the required images
[root@teckbootcamps ~] kubeadm config images list
W0728 10:09:45.567500 28248 version.go:98] could not fetch a Kubernetes version from the internet: unable to get URL "https://dl.k8s.io/release/stable-1.txt": Get https://dl.k8s.io/release/stable-1.txt: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)
W0728 10:09:45.567584 28248 version.go:99] falling back to the local client version: v1.15.0
k8s.gcr.io/kube-apiserver:v1.15.0
k8s.gcr.io/kube-controller-manager:v1.15.0
k8s.gcr.io/kube-scheduler:v1.15.0
k8s.gcr.io/kube-proxy:v1.15.0
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd:3.3.10
k8s.gcr.io/coredns:1.3.1
20. View the default initialization parameter configuration
[root@teckbootcamps ~]kubeadm config print init-defaults
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.2.3.4
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: teckbootcamps
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.14.0
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12
scheduler: {}
21. View pod logs
1. View the logs of the specified pod
kubectl logs <pod_name>
kubectl logs -f <pod_name>
#View similar to tail -f (tail -f to view the log file in real time tail -f log file log)
2. View the logs of the specified container in the specified pod
kubectl logs <pod_name> -c <container_name>
PS: View Docker container logs
docker logs <container_id>
22. View the yaml file of the pod
View the yaml file of the pod
kubectl get pod <pod-name> -n <ns-name> -o yaml
As follows:
[root@teckbootcamps shell]kubectl get pod -n kube-system kube-apiserver-teckbootcamps -o yaml
apiVersion: v1
kind: Pod
metadata:
annotations:
kubernetes.io/config.hash: 4c09c523e34dd307dbfa1702d7e5f326
kubernetes.io/config.mirror: 4c09c523e34dd307dbfa1702d7e5f326
kubernetes.io/config.seen: "2019-07-27T11:32:59.183084282+08:00"
kubernetes.io/config.source: file
creationTimestamp: "2019-07-27T03:34:32Z"
labels:
component: kube-apiserver
tier: control-plane
name: kube-apiserver-teckbootcamps
namespace: kube-system
resourceVersion: "41809"
selfLink: /api/v1/namespaces/kube-system/pods/kube-apiserver-teckbootcamps
uid: 72b76059-b01f-11e9-9ad8-00163e06971e
spec:
containers:
- command:
- kube-apiserver
- --advertise-address=172.17.211.142
- --allow-privileged=true
- --authorization-mode=Node,RBAC
- --client-ca-file=/etc/kubernetes/pki/ca.crt
- --enable-admission-plugins=NodeRestriction
- --enable-bootstrap-token-auth=true
- --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
- --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
- --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
- --etcd-servers=https://127.0.0.1:2379
- --insecure-port=0
- --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
- --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
- --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
- --requestheader-allowed-names=front-proxy-client
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
- --requestheader-extra-headers-prefix=X-Remote-Extra-
- --requestheader-group-headers=X-Remote-Group
- --requestheader-username-headers=X-Remote-User
- --secure-port=6443
- --service-account-key-file=/etc/kubernetes/pki/sa.pub
- --service-cluster-ip-range=10.96.0.0/12
- --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
- --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
image: k8s.gcr.io/kube-apiserver:v1.14.4
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8
httpGet:
host: 172.17.211.142
path: /healthz
port: 6443
scheme: HTTPS
initialDelaySeconds: 15
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 15
name: kube-apiserver
resources:
requests:
cpu: 250m
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/ssl/certs
name: ca-certs
readOnly: true
- mountPath: /etc/pki
name: etc-pki
readOnly: true
- mountPath: /etc/kubernetes/pki
name: k8s-certs
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
hostNetwork: true
nodeName: teckbootcamps
priority: 2000000000
priorityClassName: system-cluster-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
operator: Exists
volumes:
- hostPath:
path: /etc/ssl/certs
type: DirectoryOrCreate
name: ca-certs
- hostPath:
path: /etc/pki
type: DirectoryOrCreate
name: etc-pki
- hostPath:
path: /etc/kubernetes/pki
type: DirectoryOrCreate
name: k8s-certs
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2019-07-27T05:08:34Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2019-07-27T13:48:08Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2019-07-27T13:48:08Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2019-07-27T05:08:34Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: docker://b6302cc9c9e453d20ad2a86b382bdeb4c274002b0c9233f4a33c77e363e8874d
image: k8s.gcr.io/kube-apiserver:v1.14.4
imageID: docker://sha256:f3171d49fa9b9e0ddbb35fb156689aa6b07ba257c313d7de429d9f9f04585c20
lastState:
terminated:
containerID: docker://e5c1fd31b62795c30707ed1b0054773f4b5655754102ce11df2964d225b45f68
exitCode: 255
finishedAt: "2019-07-27T13:43:07Z"
reason: Error
startedAt: "2019-07-27T13:41:41Z"
name: kube-apiserver
ready: true
restartCount: 10
state:
running:
startedAt: "2019-07-27T13:48:08Z"
hostIP: 172.17.211.142
phase: Running
podIP: 172.17.211.142
qosClass: Burstable
startTime: "2019-07-27T05:08:34Z"
23. Log in to the container
When logging into the container, you need to pay attention to the shells supported by the container.
kubectl exec -it <pod-name> -n <ns-name> bash
kubectl exec -it <pod-name> -n <ns-name> sh
[root@teckbootcamps shell] kubectl exec -it monitoring-grafana-95cbdd789-fzl49 -n kube-system /bin/sh /ls
bin dashboards dev etc home proc root run.sh sys tmp usr var
When logging in, the following error is reported:
kubectl OCI runtime exec failed: exec failed: container_linux.go:345: starting container process ca
, indicating that the shell type is incorrect.
24. Create resources based on yaml
# Create resources based on yaml. Apply can be executed repeatedly, but create cannot.
kubectl create -f pod.yaml
kubectl apply -f pod.yaml
25. Delete pod according to yaml
# Delete a pod based on the name defined in pod.yaml
kubectl delete -f pod.yaml
26. Delete pod and service according to label
# Delete all pods and services containing a certain label
kubectl delete pod,svc -l name=<label-name>
27. Delete pod
[root@teckbootcamps ~]kubectl get pods
NAME READY STATUS RESTARTS AGE
frontend-2szjk 1/1 Running 0 3d1h
frontend-cv5qw 1/1 Running 0 3d1h
frontend-lp4tc 0/1 Evicted 0 3d2h
frontend-sccqj 1/1 Running 0 2d7h
redis-master-6ssmn 1/1 Running 3 3d1h
redis-slave-6vtrs 1/1 Running 1 3d2h
[root@teckbootcamps ~]kubectl delete pod frontend-lp4tc
pod "frontend-lp4tc" deleted
[root@teckbootcamps ~]kubectl get pods
NAME READY STATUS RESTARTS AGE
frontend-2szjk 1/1 Running 0 3d1h
frontend-cv5qw 1/1 Running 0 3d1h
frontend-sccqj 1/1 Running 0 2d7h
redis-master-6ssmn 1/1 Running 3 3d1h
redis-slave-6vtrs 1/1 Running 1 3d2h
28. Check the resource usage of node or pod
[root@teckbootcamps ~]kubectl top nodes
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
teckbootcamps 129m 6% 1567Mi 42%
teckbootcamps2 233m 11% 1811Mi 49%
teckbootcamps3 510m 25% 2651Mi 71%
[root@teckbootcamps3 ~]kubectl top pod
NAME CPU(cores) MEMORY(bytes)
frontend-2szjk 0m 16Mi
frontend-cv5qw 0m 16Mi
frontend-sccqj 0m 21Mi
redis-master-6ssmn 0m 1Mi
redis-slave-6vtrs 1m 8Mi
29. Edit the yaml file of the pod
#Edit podโs yaml file
kubectl get deployment -n <ns-name>
kubectl edit depolyment <pod-name> -n <ns-name> -o yaml
Examples are as follows:
[root@teckbootcamps shell] kubectl get deployment -n kube-system
NAME READY UP-TO-DATE AVAILABLE AGE
calico-kube-controllers 1/1 1 1 23h
coredns 2/2 2 2 23h
heapster 1/1 1 1 23h
kubernetes-dashboard 1/1 1 1 23h
monitoring-grafana 1/1 1 1 23h
monitoring-influxdb 1/1 1 1 23h
[root@teckbootcamps shell]kubectl edit deployment monitoring-grafana -n kube-system -o yaml
30. Enter POD
[root@teckbootcamps data]kubectl exec -it monitoring-grafana-95cbdd789-fzl49 sh -n kube-system /ls
bin dashboards dev etc home proc root run.sh sys tmp usr var
5. Configuration file directory
[root@teckbootcamps kubernetes]pwd
/etc/kubernetes
[root@teckbootcamps kubernetes] tree -h
.
โโโ [5.3K] admin.conf
โโโ [5.4K] controller-manager.conf
โโโ [5.3K] kubelet.conf
โโโ [4.0K] manifests
โ โโโ [1.9K] etcd.yaml
โ โโโ [2.5K] kube-apiserver.yaml
โ โโโ [2.2K] kube-controller-manager.yaml
โ โโโ [ 990] kube-scheduler.yaml
โโโ [4.0K] pki
โ โโโ [1.2K] apiserver.crt
โ โโโ [1.1K] apiserver-etcd-client.crt
โ โโโ [1.6K] apiserver-etcd-client.key
โ โโโ [1.6K] apiserver.key
โ โโโ [1.1K] apiserver-kubelet-client.crt
โ โโโ [1.6K] apiserver-kubelet-client.key
โ โโโ [1.0K] ca.crt
โ โโโ [1.6K] ca.key
โ โโโ [4.0K] etcd
โ โ โโโ [1021] ca.crt
โ โ โโโ [1.6K] ca.key
โ โ โโโ [1.1K] healthcheck-client.crt
โ โ โโโ [1.6K] healthcheck-client.key
โ โ โโโ [1.1K] peer.crt
โ โ โโโ [1.6K] peer.key
โ โ โโโ [1.1K] server.crt
โ โ โโโ [1.6K] server.key
โ โโโ [1.0K] front-proxy-ca.crt
โ โโโ [1.6K] front-proxy-ca.key
โ โโโ [1.0K] front-proxy-client.crt
โ โโโ [1.6K] front-proxy-client.key
โ โโโ [1.6K] sa.key
โ โโโ [ 451] sa.pub
โโโ [5.3K] scheduler.conf
3 directories, 30 files
6. Configure SSL
1. Generate SSL
1.1. The process of CA generating certificate is as follows:
Zees-Air-2:ssl Zee$ openssl genrsa -des3 -passout pass:x -out dashboard.pass.key 2048
Generating RSA private key, 2048 bit long modulus
.....+++
........................+++
e is 65537 (0x10001)
Zees-Air-2:ssl Zee$ ll
total 32
-rw-r--r-- 1 Zee staff 1751 Nov 22 09:23 dashboard.pass.key
Zees-Air-2:ssl Zee$ openssl rsa -passin pass:x -in dashboard.pass.key -out dashboard.key
writing RSA key
Zees-Air-2:ssl Zee$ ll
total 40
-rw-r--r-- 1 Zee staff 1751 Nov 22 09:23 dashboard.pass.key
-rw-r--r-- 1 Zee staff 1679 Nov 22 09:23 dashboard.key
Zees-Air-2:ssl Zee$ openssl req -new -key dashboard.key -out dashboard.csr
You are about to be asked to enter information that will be incorporated
into your certificate request.
What you are about to enter is what is called a Distinguished Name or a DN.
There are quite a few fields but you can leave some blank
For some fields there will be a default value,
If you enter '.', the field will be left blank.
-----
Country Name (2 letter code) [AU]:CN
State or Province Name (full name) [Some-State]:BeiJing
Locality Name (eg, city) []:BeiJing
Organization Name (eg, company) [Internet Widgits Pty Ltd]:teckbootcamps
Organizational Unit Name (eg, section) []:teckbootcamps
Common Name (e.g. server FQDN or YOUR name) []:teckbootcamps.com
Email Address []:
Please enter the following 'extra' attributes
to be sent with your certificate request
A challenge password []:
An optional company name []:
Zees-Air-2:ssl Zee$ ll
total 48
-rw-r--r-- 1 Zee staff 1751 Nov 22 09:23 dashboard.pass.key
-rw-r--r-- 1 Zee staff 1679 Nov 22 09:23 dashboard.key
-rw-r--r-- 1 Zee staff 1009 Nov 22 09:24 dashboard.csr
Zees-Air-2:ssl Zee$ openssl x509 -req -sha256 -days 365 -in dashboard.csr -signkey dashboard.key -out dashboard.crt
Signature ok
subject=/C=CN/ST=BeiJing/L=BeiJing/O=teckbootcamps/OU=teckbootcamps/CN=teckbootcamps.com
Getting Private key
Zees-Air-2:ssl Zee$ ll
total 56
-rw-r--r-- 1 Zee staff 1751 Nov 22 09:23 dashboard.pass.key
-rw-r--r-- 1 Zee staff 1679 Nov 22 09:23 dashboard.key
-rw-r--r-- 1 Zee staff 1009 Nov 22 09:24 dashboard.csr
-rw-r--r-- 1 Zee staff 1212 Nov 22 09:25 dashboard.crt
Zees-Air-2:ssl Zee$ openssl x509 -in dashboard.crt -out dashboard.pem
Zees-Air-2:ssl Zee$ ll
total 72
-rw-r--r-- 1 Zee staff 1751 Nov 22 09:23 dashboard.pass.key
-rw-r--r-- 1 Zee staff 1679 Nov 22 09:23 dashboard.key
-rw-r--r-- 1 Zee staff 1009 Nov 22 09:24 dashboard.csr
-rw-r--r-- 1 Zee staff 1212 Nov 22 09:25 dashboard.crt
-rw-r--r-- 1 Zee staff 1212 Nov 22 09:28 dashboard.out
-rw-r--r-- 1 Zee staff 1212 Nov 22 09:28 dashboard.pem
1.2. Server certificate is generated as follows:
Zees-Air-2:ssl Zee$ openssl genrsa -out server.key 2048
Generating RSA private key, 2048 bit long modulus
..................................................................................................................................................................+++
.............................................+++
e is 65537 (0x10001)
Zees-Air-2:ssl Zee$ ll
total 72
-rw-r--r-- 1 Zee staff 1751 Nov 22 09:23 dashboard.pass.key
-rw-r--r-- 1 Zee staff 1679 Nov 22 09:23 dashboard.key
-rw-r--r-- 1 Zee staff 1009 Nov 22 09:24 dashboard.csr
-rw-r--r-- 1 Zee staff 1212 Nov 22 09:25 dashboard.crt
-rw-r--r-- 1 Zee staff 1212 Nov 22 09:28 dashboard.pem
-rw-r--r-- 1 Zee staff 1679 Nov 22 09:54 server.key
Zees-Air-2:ssl Zee$ openssl req -new -key server.key -subj "/CN=teckbootcamps" -out server.csr
Zees-Air-2:ssl Zee$ ll
total 80
-rw-r--r-- 1 Zee staff 1751 Nov 22 09:23 dashboard.pass.key
-rw-r--r-- 1 Zee staff 1679 Nov 22 09:23 dashboard.key
-rw-r--r-- 1 Zee staff 1009 Nov 22 09:24 dashboard.csr
-rw-r--r-- 1 Zee staff 1212 Nov 22 09:25 dashboard.crt
-rw-r--r-- 1 Zee staff 1212 Nov 22 09:28 dashboard.pem
-rw-r--r-- 1 Zee staff 1679 Nov 22 09:54 server.key
-rw-r--r-- 1 Zee staff 891 Nov 22 09:55 server.csr
Zees-Air-2:ssl Zee$ openssl x509 -req -in server.csr -CA dashboard.crt -CAkey dashboard.key -CAcreateserial -out server.crt -days 5000
Signature ok
subject=/CN=teckbootcamps
Getting CA Private Key
Zees-Air-2:ssl Zee$ ll
total 96
-rw-r--r-- 1 Zee staff 1751 Nov 22 09:23 dashboard.pass.key
-rw-r--r-- 1 Zee staff 1679 Nov 22 09:23 dashboard.key
-rw-r--r-- 1 Zee staff 1009 Nov 22 09:24 dashboard.csr
-rw-r--r-- 1 Zee staff 1212 Nov 22 09:25 dashboard.crt
-rw-r--r-- 1 Zee staff 1212 Nov 22 09:28 dashboard.pem
-rw-r--r-- 1 Zee staff 1679 Nov 22 09:54 server.key
-rw-r--r-- 1 Zee staff 891 Nov 22 09:55 server.csr
-rw-r--r-- 1 Zee staff 1094 Nov 22 09:56 server.crt
-rw-r--r-- 1 Zee staff 17 Nov 22 09:56 dashboard.srl
๐ฅ [20% Off] Kubernetes Exam Vouchers (CKAD , CKA and CKS) [RUNNING NOW 2024 ]
Save 20% on all the Linux Foundation training and certification programs. This is a limited-time offer for this month. This offer is applicable for CKA, CKAD, CKS, KCNA, LFCS, PCA FINOPS, NodeJS, CHFA, and all the other certification, training, and BootCamp programs.
$395 $316
- Upon registration, you have ONE YEAR to schedule and complete the exam.
- The CKA exam is conducted online and remotely proctored.
- To pass the exam, you must achieve a score of 66% or higher.
- The CKAD Certification remains valid for a period of 3 years.
- You are allowed a maximum of 2 attempts to take the test. However, if you miss a scheduled exam for any reason, your second attempt will be invalidated.
- Free access to killer.sh for the CKAD practice exam.
CKAD Exam Voucher: Use coupon Code TECK20 at checkout
$395 $316
๏ปฟ
- Upon registration, you have ONE YEAR to schedule and complete the exam.
- The CKA exam is conducted online and remotely proctored.
- To pass the exam, you must achieve a score of 66% or higher.
- The CKA Certification remains valid for a period of 3 years.
- You are allowed a maximum of 2 attempts to take the test. However, if you miss a scheduled exam for any reason, your second attempt will be invalidated.
- Free access to killer.sh for the CKA practice exam.
CKA Exam Voucher: Use coupon Code TECK20 at checkout
$395 $316
๏ปฟ
- Upon registration, you have ONE YEAR to schedule and complete the exam.
- The CKA exam is conducted online and remotely proctored.
- To pass the exam, you must achieve a score of 67% or higher.
- The CKS Certification remains valid for a period of 2 years.
- You are allowed a maximum of 2 attempts to take the test. However, if you miss a scheduled exam for any reason, your second attempt will be invalidated.
- Free access to killer.sh for the CKS practice exam.
CKS Exam Voucher: Use coupon Code TECK20 at checkout
Check our last updated Kubernetes Exam Guides (CKAD , CKA , CKS) :