yaml example
volumes hostPath
apiVersion: v1 kind: Pod metadata: name: volumes-hostpath namespace: default labels: app: test-hostpath annotations: descritions: "this is test for volumes.hostPath" spec: containers: - name: nginx image: nginx imagePullPolicy: IfNotPresent ports: - name: http containerPort: 80 volumeMounts: - name: code mountPath: /usr/share/nginx/html/ readOnly: false volumes: - name: code hostPath: path: /root/code/ type: Directory
volumes nfs
apiVersion: v1 kind: Pod metadata: name: volumes-nfs namespace: default labels: app: nfs annotations: descritions: "This is test for volumes.nfs" spec: containers: - name: nfs image: nginx imagePullPolicy: IfNotPresent ports: - name: http containerPort: 80 volumeMounts: - name: code mountPath: /usr/share/nginx/html/ volumes: - name: code nfs: path: /data server: nfs01.local
pv pvc
1、创建nfs 并导出
创建目录:
mkdir vol{1,2,3,4,5}
写配置:
vim /etc/exports /data/vol1 192.168.1.0/24(rw,no_root_squash) /data/vol2 192.168.1.0/24(rw,no_root_squash) /data/vol3 192.168.1.0/24(rw,no_root_squash) /data/vol4 192.168.1.0/24(rw,no_root_squash) /data/vol5 192.168.1.0/24(rw,no_root_squash)
导出券:
exportfs -arv
查看是否成功导出:
showmount -e
2、创建pv
编写yaml文件
apiVersion: v1 kind: PersistentVolume metadata: name: pv1 namespace: default labels: name: pv1 spec: nfs: path: /data/vol1 server: nfs01 accessModes: ["ReadWriteOnce","ReadWriteMany","ReadOnlyMany"] capacity: storage: 2Gi --- apiVersion: v1 kind: PersistentVolume metadata: name: pv2 namespace: default labels: name: pv2 spec: nfs: path: /data/vol2 server: nfs01 accessModes: ["ReadWriteOnce","ReadOnlyMany"] capacity: storage: 1Gi --- apiVersion: v1 kind: PersistentVolume metadata: name: pv3 namespace: default labels: name: pv3 spec: nfs: path: /data/vol3 server: nfs01 accessModes: ["ReadWriteOnce","ReadWriteMany","ReadOnlyMany"] capacity: storage: 3Gi --- apiVersion: v1 kind: PersistentVolume metadata: name: pv4 namespace: default labels: name: pv4 spec: nfs: path: /data/vol4 server: nfs01 accessModes: ["ReadWriteMany","ReadOnlyMany"] capacity: storage: 2Gi --- apiVersion: v1 kind: PersistentVolume metadata: name: pv5 namespace: default labels: name: pv5 spec: nfs: path: /data/vol5 server: nfs01 accessModes: ["ReadWriteOnce","ReadWriteMany"] capacity: storage: 4Gi
创建pv
kubectl apply -f volumes-pv.yaml
查看pv
kubectl get pv
pv中的数据默认是保留的 Retain ,也可以有其他设置。
3、创建pvc
编写yaml文件
apiVersion: v1 kind: PersistentVolumeClaim metadata: name: mypvc namespace: default labels: name: pvc spec: accessModes: ["ReadWriteMany"] resources: requests: storage: 2Gi
创建pvc
kubectl apply -f volumes-pvc.yaml
查看pvc
kubectl get pvc
4、创建Pod 并使用pvc
apiVersion: v1 kind: Pod metadata: name: busybox namespace: default spec: containers: - name: busybox-pvc image: nginx imagePullPolicy: IfNotPresent volumeMounts: - name: code mountPath: /usr/share/nginx/html/ volumes: - name: code persistentVolumeClaim: claimName: mypvc
创建pod
kubectl apply -f volumes-pod-pvc.yaml
这时查看pv就可以看到对应的pv已近处于Bind状态了
configMap
cm.yaml
apiVersion: v1 data: site.conf: | server { listen 80; index index.php index.html; server_name localhost; error_log /var/log/nginx/error.log; access_log /var/log/nginx/access.log; root /code; location ~ \.php$ { try_files $uri =404; fastcgi_split_path_info ^(.+\.php)(/.+)$; fastcgi_pass php:9000; fastcgi_index index.php; include fastcgi_params; fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; fastcgi_param PATH_INFO $fastcgi_path_info; } } kind: ConfigMap metadata: name: nginx-www2 namespace: default
configmap-pod.yaml
apiVersion: v1 kind: Pod metadata: name: config-map namespace: default labels: app: config-map-test-busybox annotations: descriptions: "test configmap" spec: containers: - name: busybox image: busybox:latest imagePullPolicy: IfNotPresent command: ["/bin/sh","-c","sleep 730000"] ports: - name: http containerPort: 80 volumeMounts: - name: siteconfig mountPath: /etc/nginx/conf.d/ readOnly: true volumes: - name: siteconfig configMap: name: nginx-www
kubectl apply -f cm.yaml
kubectl apply -f configmap-pod.yaml
liveness-httpget
apiVersion: v1 kind: Pod metadata: name: liveness-httpget labels: app: liveness myapp: test-app annotations: description: "This is liveness Test" spec: containers: - name: nginx image: nginx imagePullPolicy: IfNotPresent ports: - name: http containerPort: 80 volumeMounts: - name: code mountPath: "/usr/share/nginx/html/" readOnly: false livenessProbe: httpGet: path: "/index.html" port: 80 scheme: "HTTP" initialDelaySeconds: 10 failureThreshold: 2 timeoutSeconds: 1 periodSeconds: 5 resources: limits: cpu: "300m" memory: "300Mi" requests: cpu: "200m" memory: "200Mi" restartPolicy: Never volumes: - name: code persistentVolumeClaim: claimName: mypvc
rediness
rediness.yaml
apiVersion: v1 kind: Pod metadata: name: rediness-httpget-pod namespace: default spec: containers: - name: rediness-demo image: nginx imagePullPolicy: IfNotPresent ports: - name: http containerPort: 80 readinessProbe: httpGet: port: http path: /index.html1 initialDelaySeconds: 1 periodSeconds: 3
lifecycle
lifecycle.yaml
apiVersion: v1 kind: Pod metadata: name: poststart-pod namespace: default spec: containers: - name: poststart image: busybox imagePullPolicy: IfNotPresent lifecycle: postStart: exec: command: ["mkdir","-p","/data/html/"] command: ["/bin/sh","-c","sleep 3600"]
replicaset
apiVersion: apps/v1 kind: ReplicaSet metadata: name: myapp namespace: default spec: replicas: 2 selector: matchLabels: app: myapp release: canary template: metadata: name: myapp-pod labels: app: myapp release: canary spec: containers: - name: myapp-containers image: busybox imagePullPolicy: IfNotPresent command: ["/bin/sh","-c","sleep 999999"] ports: - name: http containerPort: 80
deployment
deployment.yaml
apiVersion: apps/v1 kind: Deployment metadata: name: myapp-dm namespace: default spec: replicas: 3 selector: matchLabels: app: myapp release: canary template: metadata: labels: app: myapp release: canary spec: containers: - name: myapp image: busybox imagePullPolicy: IfNotPresent ports: - name: http containerPort: 80 command: ["/bin/sh","-c","sleep 3600"]
查看deploy:kubectl get deploy
查看ReplicaSet:kubectl get rs
查看滚动历史:kubectl rollout history deployment myapp-dm
可以直接修改yaml文件,并使用apply 重复执行yaml文件。
daemonset
daemonset.yaml
apiVersion: apps/v1 kind: DaemonSet metadata: name: daemon-set namespace: default spec: selector: matchLabels: app: app1 release: stable template: metadata: labels: app: app1 release: stable spec: containers: - name: app1 image: nginx imagePullPolicy: IfNotPresent ports: - name: http containerPort: 80
DaemonSet一个节点运行一个Pod。不需要replicas 字段了。
service
service.yaml
apiVersion: v1 kind: Service metadata: name: service-demo namespace: default spec: selector: app: nginx-web type: ClusterIP clusterIP: "" ports: - name: http port: 80 targetPort: 80
deployment-nginx.yaml
apiVersion: apps/v1 kind: Deployment metadata: name: nginx-web-deploy namespace: default spec: replicas: 2 selector: matchLabels: app: nginx-web template: metadata: labels: app: nginx-web spec: containers: - name: nginx-web image: nginx:v1 imagePullPolicy: IfNotPresent ports: - name: http containerPort: 80
headless-service
apiVersion: v1 kind: Service metadata: name: service-demo namespace: default spec: selector: app: nginx-web type: ClusterIP clusterIP: None ports: - name: http port: 80 targetPort: 80
dig -t -A service-demo.default.svc.cluster.local @10.96.0.10
使用上面的命令可以 dns解析出一列 Pod的访问IP
StatefulSet
先创建pv
volumes-pv.yaml
apiVersion: v1 kind: PersistentVolume metadata: name: pv1 namespace: default labels: name: pv1 spec: nfs: path: /data/vol1 server: nfs01 accessModes: ["ReadWriteOnce","ReadWriteMany","ReadOnlyMany"] capacity: storage: 2Gi --- apiVersion: v1 kind: PersistentVolume metadata: name: pv2 namespace: default labels: name: pv2 spec: nfs: path: /data/vol2 server: nfs01 accessModes: ["ReadWriteOnce","ReadOnlyMany"] capacity: storage: 2Gi --- apiVersion: v1 kind: PersistentVolume metadata: name: pv3 namespace: default labels: name: pv3 spec: nfs: path: /data/vol3 server: nfs01 accessModes: ["ReadWriteOnce","ReadWriteMany","ReadOnlyMany"] capacity: storage: 2Gi --- apiVersion: v1 kind: PersistentVolume metadata: name: pv4 namespace: default labels: name: pv4 spec: nfs: path: /data/vol4 server: nfs01 accessModes: ["ReadWriteMany","ReadOnlyMany"] capacity: storage: 2Gi --- apiVersion: v1 kind: PersistentVolume metadata: name: pv5 namespace: default labels: name: pv5 spec: nfs: path: /data/vol5 server: nfs01 accessModes: ["ReadWriteOnce","ReadWriteMany"] capacity: storage: 2Gi
statfulset.yaml
apiVersion: v1 kind: Service metadata: name: myapp-svc labels: app: myapp-svc spec: selector: app: myapp ports: - port: 80 name: http clusterIP: None --- apiVersion: apps/v1 kind: StatefulSet metadata: name: myapp spec: serviceName: myapp-svc replicas: 2 selector: matchLabels: app: myapp template: metadata: labels: app: myapp spec: containers: - name: myapp image: nginx:v1 imagePullPolicy: IfNotPresent ports: - name: http containerPort: 80 volumeMounts: - name: code mountPath: /usr/share/nginx/html/ volumeClaimTemplates: - metadata: name: code spec: accessModes: ["ReadWriteOnce"] resources: requests: storage: 2Gi
扩缩容:
kubectl scale sts myapp --replicas=2
滚动更新,小于 2 的不跟新
kubectl patch sts myapp -p '{"spec":{"updateStrategy":{"rollingUpdate":{"partition":2}}}}'
更新镜像
kubectl set image sts/myapp myapp=nginx:v2