>> etcd 백업 & 복구
<백업-backup>
ETCDCTL_API=3 etcdctl\
--endpoints=https://127.0.0.1:2379\
--cacert=<trusted-ca-file>\
--cert=<cert-file>\
--key=<key-file>\
snapshot save <backup-file-location>
ETCDCTL_API=3 etcdctl --endpoints=https://127.0.0.1:2379 \
--cacert=/etc/kubernetes/pki/etcd/ca.crt \
--cert=/etc/kubernetes/pki/etcd/server.crt \
--key=/etc/kubernetes/pki/etcd/server.key \
snapshot save /data/etcd-snapthot.db
<복구 -restore>
ETCDCTL_API=3 etcdctl\
--data-dir <data-dir-location>\
snapshot restore snapshotdb
export ETCDCTL_API=3
etcdctl snapshot restore --data-dir /var/lib/etcd_new etcd-snapshotdb
>> POD 생성
--------------------------------------------------------------------
cluster: k8s
namespace name: ecommerce
pod Name: eshop-main
image: nginx:1.17
env: DB=mysql
--------------------------------------------------------------------
$> kubectl run eshop-main --image=nginx:1.17 --namespace ecommerce --env="DB=mysql"
>>multi-container Pod
$ sudo vi ./multi.yaml
apiVersion: v1
kind: Pod
metadata:
name: lab004
spec:
containers:
- image: nginx
name: nginx
- image: redis
name: redis
- image: memcached
name: memcached
>>Side-car Container
busybox 추가
- name: sidecar
image: busybox
args: [/bin/sh, -c, 'tail -n+1 -F /var/log/cart-app.log']
volumeMounts:
- name: varlog
mountPath: /var/log
volumeMounts:
- name: varlog
mountPath: /var/log/nginx/
volumes:
- emptyDir: {}
name: varlog
>> Rolling update
$> sudo kubectl create deployment nginx-app --image=nginx:1.11.10-alpine --replicas=3
$> sudo kubectl set image deployment nginx-app nginx=nginx:1.11.13-alpine --record
-상태 확인
$> kubectl rollout status deployment nginx-app
$> kubectl rollout history deployment nginx-app
-Rollback 하기
$> kubectl rollout undo deployment nginx-app
>>Node Selector
nodeSelector:
disktype: ssd
$> kubectl label node/cp-k8s disktype=ssd
root@cp-k8s:/data/k8s# k get node/cp-k8s -L disktype
NAME STATUS ROLES AGE VERSION DISKTYPE
cp-k8s Ready control-plane 26d v1.29.9 ssd
>> node 관리
- cordon & drain
$> kubectl drain <node-name> --ignore-daemonsets --force --delete-emptydir-data
$> sudo kubectl describe node cp-k8s | grep -i noschedule
>>Pod Log 추출 / CPU 사용량 높은 Pod 검색
$> sudo kubectl top pods -l name=overloaded-cpu --sort-by=cpu
- CPU 사용률 가장 높은 pod를 /var/CKA2022/cpu_load_pod.txt 에 기록한다.
$> sudo echo "POD_NAME" > /var/CKA2022/cpu_load_pod.txt
>>Init container를 포함한 pod 운영
- Pod 가 실행될 때 main 컨테이너가 동작하기 전에 먼저 실행되는 컨테이너
- 유틸리티 또는 설정 스크립트등을 포함할 수 있음
initContainers:
- name: init
image: busybox:1.28
command: ['sh', '-c', "touch /workdir/data.txt"]
volumeMounts:
- name: workdir
mountPath: "/workdir"
>>configmap
$> kubectl create configmap web-config -n ckad \
--from-literal=connection_string=localhost:80
--from-literal=external_url=cncf.io
$> sudo kubectl run web-pod --image=nginx:1.19.8-alpine --port=80 --dry-run=client -o yaml > web-pod.yaml
$> sudo vi web-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: web-pod
namespace: ckad
spec:
containers:
- image: nginx:1.19.8-alpine
name: web-pod
envFrom:
- configMapRef:
name: web-config
>> Secret 운영
$> kubectl create secret generic super-secret \
--from-literal=password=secretpass
ex) Create a Pod named pod-secrets-via-file, using the redis image, which mounts a secret named
super-secret at /secrets.
$> sudo vi pod-secret-via-file.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-secrets-via-file
spec:
containers:
- name: mypod
image: redis
volumeMounts:
- name: foo
mountPath: "/secrets"
volumes:
- name: foo
secret:
secretName: super-secret
$> kubectl apply -f pod-secret-via-file.yaml
ex) Create a second Pod named pod-secrets-via-env, using the redis image, which exports password as PASSWORD
$> vi pod-secret-via-env.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-secrets-via-env
spec:
containers:
- name: mycontainer
image: redis
env:
- name: PASSWORD
valueFrom:
secretKeyRef:
name: super-secret
key: password
$> kubectl apply -f pod-secret-via-env.yaml
>>Ingress 구성
- nginx Pod 생성
$> kubectl run nginx --image=nginx --labels=app=nginx -n ingress-nginx
$> kubectl expose -n ingress-nginx pod nginx --port=80 --target-port=80
- ingress 생성 (kubenetes commulit 참조)
$> vi app-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
namespace: ingress-nginx
name: app-ingress
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
kubernetes.io/ingress.class: nginx
spec:
rules:
- http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: nginx
port:
number: 80
- path: /app
pathType: Prefix
backend:
service:
name: appjs
port:
number: 80
$> kubectl apply -f app-ingress.yaml
>> Persistent Volume 생성
ex) Create a persistent volume with name app-config, of capacity 1Gi and access mode ReadWriteMany.
- storageClass : az-c
- The type of volume is hostPath and its location is /srv/app-config
$> vi app-config-pv.yaml (kubenetes -커뮤니티 블로그 참조하여 생성)
apiVersion: v1
kind: PersistentVolume
metadata:
name: app-config
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
storageClassName: az-c
hostPath:
path: /srv/app-config
$> kubectl apply -f app-config-pv.yaml
>>Persistent Volume Claim을 사용하는 Pod 운영
ex)
- Create a new PersistentVolumeClaim:
Name: app-volume
StorageClass: app-hostpath-sc
Capacity: 10Mi
- Create a new Pod which mounts the PersistentVolumeClaim as a volume:
Name: web-server-pod
Image: nginx
Mount path: /usr/share/nginx/html
- Configure the new Pod to have ReadWriteMany access on the volume.
$> vi app-volume-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: app-volume
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Mi
storageClassName: app-hostpath-sc
$> kubectl apply -f app-volume-pvc.yaml
----------------------------------------------------------------
$> vi web-server-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: web-server-pod
spec:
containers:
- name: nginx
image: nginx
volumeMounts:
- mountPath: "/usr/share/nginx/html"
name: mypd
volumes:
- name: mypd
persistentVolumeClaim:
claimName: app-volume
$> kubectl apply -f web-server-pod.yaml
----------------------------------------------------------------
>> Check Resource Information
$> kubectl get pv --sort-by=metadata.name
>> Kubernetes Upgrade
$> apt update
$> apt-cache madison kubeadm | grep 1.21.3
- kubeadm upgrade
$> apt-mark unhold kubeadm && apt-get update && apt-get install -y kubeadm=1.21.3-00 && apt-mark hold kubeadm
$> apt install kubeadm=1.22.2-00 kubelet=1.22.2-00 kubectl=1.22.2-00
- kubectl upgrade
$> apt-mark unhold kubelet kubectl && apt-get update && apt-get install -y kubelet=1.21.3-00 kubectl=1.21.3-00 && apt-mark hold kubelet kubectl
$> systemctl daemon-reload
$> systemctl restart kubelet
$> kubeadm upgrade plan v1.21.3
$> kubeadm upgrade apply v1.21.3
>> network policy
- 특정 파드에서만 접근을 허용하고자 하는 경우
apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: db-policy namespace: prod spec: podSelector: matchLabels: role: db policyTypes: - Ingress ingress: - from: - podSelector: matchLabels: role: api-pod ports: - protocol: TCP port: 3306 |
- 특정 네임스페이스의 특정 파드에서만 접근을 허용하고자 하는 경우
apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: db-policy namespace: prod spec: podSelector: matchLabels: role: db policyTypes: - Ingress ingress: - from: - podSelector: matchLabels: role: api-pod namespaceSelector: matchLabels: name: dev ports: - protocol: TCP port: 3306 |
- 특정 네임스페이스의 모든 파드에서 접근을 허용하고자 하는 경우
apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: db-policy namespace: prod spec: podSelector: matchLabels: role: db policyTypes: - Ingress ingress: - from: - namespaceSelector: matchLabels: name: dev ports: - protocol: TCP port: 3306 |
- 클러스터 외부의 특정 서버로부터 접근을 허용하고자 하는 경우
apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: db-policy namespace: prod spec: podSelector: matchLabels: role: db policyTypes: - Ingress ingress: - from: - ipBlock: cidr: 192.168.1.100/32 ports: - protocol: TCP port: 3306 |
- DB에서 특정 파드로 접근을 허용하고자 하는 경우
apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: db-policy namespace: prod spec: podSelector: matchLabels: role: db policyTypes: - Egress egress: - to: - podSelector: matchLabels: role: etc-pod ports: - protocol: TCP port: 8080 |
- DB에서 클러스터 외부의 특정 서버로 접근을 허용하고자 하는 경우
apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: db-policy namespace: prod spec: podSelector: matchLabels: role: db policyTypes: - Egress egress: - to: - ipBlock: cidr: 192.168.1.100/32 ports: - protocol: TCP port: 8080 |
>> NetworkPolicy 목록 확인
$> kubectl get networkpolicies
- key file 및 csr 파일 생성
$> mkdir -p /data/cka/
$> openssl genrsa -out /data/cka/ckauser.key 2048
$> openssl req -new -key /data/cka/ckauser.key -out /data/cka/ckauser.csr
$> cat /data/cka/ckauser.csr | base64 | tr -d "\n"
- CertificateSigningRequest 요청을 위에서 확인한 request 항목으로 교체하여 다음과 같이 진행한다.
$> cat <<EOF | sudo kubectl apply -f -
apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
name: ckauser
spec:
request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ2lqQ0NBWElDQVFBd1JURUxNQWtHQTFVRUJoTUNRVlV4RXpBUkJnTlZCQWdNQ2xOdmJXVXRVM1JoZEdVeApJVEFmQmdOVkJBb01HRWx1ZEdWeWJtVjBJRmRwWkdkcGRITWdVSFI1SUV4MFpEQ0NBU0l3RFFZSktvWklodmNOCkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNTjVFWXpBeGZZY3JOMGlQT0dzajZPaGVBOVc1WkVvR1ZnZENnTnEKUXNDdXJNdEZWbmsyM1dOT2hzTXIvN2hWRU1IdFFNVkhrWmFaZ0JiK2R0K0wxSFpNOWkweHhIdExmRzlQK3hSdApVV2tOamh6VkZqenowZnVQZ2RlVW5CSmlpRWJQREl5LzhOaGlPbDVGa2lMejFCTHhMSVQxZWpLODJpaUZNa3RPClU2ZGFlczI1MEpZMFcyM0pkeWl6d1I3bGtKc0pMVlZNSEt0ZzBkM3Y0c2taQlp2MDAyckxrV3FJRFJBWU5WRm8KUDRZeEZEMHlxL0tRLzhncHB6ZTNkUXZWS0FYT1p4VVppM0tEZDhGMnk5RzA0MmE4R25mRHQyZmQwbnpSU2owegpQU2lMc2pQTXBXamUvMnhFbGlOY0tQbVNVNGNZbi95a3dDbzY0NnhoODhENG1tY0NBd0VBQWFBQU1BMEdDU3FHClNJYjNEUUVCQ3dVQUE0SUJBUUJ0a3VNU2NYd3E4clRuNlQzeUFLcWxLdEE3c3RoWTZOSXgxNVBMbTJoWmJPdHoKMis4UzFyTDlVUUJJcUJRMlFWdVJNa2U2ZnFjOTRpNFU2dFI2SnpnK1QrMXJoSTJhSS96anBkL2Q3dEoxa01FUgpmZzBwUHd6Q2lZb056M0F6YytyWEh3bklIRmhoN0wwK0l1Ym9tdTBvcURiMzVnR0RNdHNHMnVEV2x0eEZwdGtECklaWXREVHJJT3ZqYVVRd2VGVkhpdGw5SkJGVEF0dnVIZVBCNUZtQlF3dU56VEo0VmkvRmN0dm9hRzVEUlVsRnUKK0pvNWlxSTNBYzAzNjJBT3hTeXV0L3FLdzNDTE8wZHlGbElNMzM2NEJTQnNPQTJVbkswSHhXQjZZSWRXdHhFZwpZUEdvSElxUGFDV25obzJ0ZTAxZ3c5U2ZCRkgwNFhMME80Tm00cHFFCi0tLS0tRU5EIENFUlRJRklDQVRFIFJFUVVFU1QtLS0tLQo=
signerName: kubernetes.io/kube-apiserver-client
usages:
- client auth
EOF
- CertificateSigningRequest 상태 확인
$> kubectl get csr
- CertificateSigningRequest 를 승인
$> kubectl certificate approve ckauser
>> Role 생성
$ kubectl create role pod-role --verb=create,delete,watch,list,get --resource=pods
- Role 과 User 를 binding
$ kubectl create rolebinding pod-rolebinding --role=pod-role --user=ckauser
- Context 생성
$ kubectl config set-credentials ckauser \
--client-key=/data/cka/ckauser.key \
--client-certificate=/data/cka/ckauser.csr \
--embed-certs=true
- 자격 증명 설정
kubectl config set-credentials 명령어는 기본적으로 사용자의 자격 증명을 설정하는데 사용됩니다. 예를 들어, 사용자 이름, 인증서, 토큰 등의 인증 정보를 설정할 수 있습니다.
$> kubectl config set-credentials ckauser --username=<USERNAME> --password=<PASSWORD>
인증서를 사용하는 경우:
$> kubectl config set-credentials ckauser --client-certificate=<PATH_TO_CERT> --client-key=<PATH_TO_KEY>
--username: 사용자의 이름을 설정합니다.
--password: 사용자의 비밀번호를 설정합니다.
--client-certificate: 사용자의 클라이언트 인증서를 설정합니다.
--client-key: 클라이언트 인증서에 대응하는 비밀 키를 설정합니다.
--token: 인증에 사용할 토큰을 설정합니다
$> kubectl config set-context ckauser --cluster=kubernetes --user=ckauser
여러 context를 사용하여 서로 다른 클러스터나 사용자 자격 증명에 대해 작업을 할 수 있습니다.
kubectl config set-context 명령어는 새로운 context를 설정하거나 기존 context를 수정합니다.
kubectl config set-context 명령어는 다음의 세 가지 주요 요소를 결합하여 context를 설정합니다:
1)클러스터 (--cluster): 연결할 Kubernetes 클러스터.
2)사용자 (--user): 인증에 사용할 사용자 자격 증명.
3)네임스페이스 (--namespace, 옵션으로 제공 가능): 기본적으로 사용될 네임스페이스. 설정하지 않으면 기본 네임스페이스인 default가 사용됩니다.
ckauser context로 스위칭
$> kubectl config set-context ckauser --cluster=kubernetes --user=ckauser
$> kubectl config get-contexts 전체 리스트
$> kubectl config user-context 스위칭
$> kubectl config view --flatten > ~/config
context 추가
$> kubectl config set-context ckauser --cluster=kubernetes --user=ckauser
>>User Cluster Role Binding
- ClusterRole 생성
$> kubectl create clusterrole app-clusterrole --verb=get,list,watch --resource=deployment,service
- ClusterRole binding
$> kubectl create clusterrolebinding app-clusterrolebinding --clusterrole=app-clusterrole --user=ckauser
>> ServiceAccount Role binding
1) Service Accounts 생성
$> kubectl create namespace apps
$> kubectl create serviceaccount pod-access
2) Pod Role 생성
$> kubectl create role pod-role --verb=get,list,watch --resource=pods -n apps
3) Rolebinding
$> kubectl create rolebinding pod-rolebinding --serviceaccount=apps:pod-access --role=pod-role -n apps
>>ServiceAccount Cluster Role binding
1) Service Account 생성
$ kubectl create serviceaccount cicd-token -n apps
2) ClusterRole 생성
$ kubectl create clusterrole deployment-clusterrole --verb=create --resource=Deployment,statefulset,daemonSet -n apps
3) ClusterRole binding
$ kubectl create clusterrolebinding deployment-clusterrole-binding --clusterrole=deployment-clusterrole --serviceaccount=apps:cicd-token -n apps
>>Kube-DNS
- pod 생성 및 expose
$ kubectl run nginx-resolver --image=nginx
$ kubectl expose pod nginx-resolver --name nginx-resolver-service --port=80 --target-port=80
- nslookup 질의
2.1 dns 질의를 위한 busybox 이미지 파드를 임시로 생성한다.
$ kubectl run busybox --image=busybox:1.28 --rm -it --restart=Never -- /bin/sh
- service에 대해서 nslookup 조회
# nslookup my-resolver-service.default.svc.cluster.local.
# nslookup 10-244-1-2.default.pod.cluster.local
- 파일에 내용 입력
$ cat > /tmp/nginx.svc
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-systehttp://m.svc.cluster.local
Name: nginx-resolver-service.default.svc.cluster.local.
Address 1: 10.97.185.41 nginx-resolver-service.default.svc.cluster.local
$ cat /tmp/nginx.pod
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-systehttp://m.svc.cluster.local
Name: 10-244-1-2.default.pod.cluster.local
Address 1: 10.244.1.2 10-244-1-2.nginx-resolver-service.default.svc.cluster.local
>>NetworkPolicy
- 라벨링 확인
$> kubectl get pod --show-labels
$> kubectl get ns nginx --show-labels
$ cat > allow-port-from-namespace.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-port-from-namespace
namespace: devops
spec:
podSelector:
matchLabels:
app: web
policyTypes:
- Ingress
ingress:
- from:
- namespaceSelector:
matchLabels:
team: migops
ports:
- protocol: TCP
port: 80
'클라우드' 카테고리의 다른 글
[K8S] kubernetes 기본 & 운영 명령어 (0) | 2024.11.27 |
---|---|
[Elasticsearch] index 의 shard(샤드) 개념 (0) | 2024.11.27 |
[k8s]Airflow Helm Install 가이드 (0) | 2024.11.26 |
베어 메탈(Bare Metal)이란.. (0) | 2023.06.15 |