kubernetes

kubernetes

_

k8s一些命令

#查看集群所有节点
kubectl get nodes

#根据配置文件,给集群创建资源
kubectl apply -f xxx.yaml

#查看集群部署了那些应用
kubectl get pods -A

#查看命名空间
kubuctl get namespace     
#也可以用简称
kubectl get ns

#查看默认命名空间
kubectl get pods

#查看指定的名称空间
kubectl get pods -n [namespace]

#创建命名空间
kubectl  create ns  [namespace]

# 删除命名空间 自带的不能删除,删除也会被拒绝
kubectl delete ns [namespace]

#根据配置文件删除命名空间
kubectl delete -f hello.yaml

# 如果某个pod没有运行,描述一下怎么了
kubectl describe [podname]

#查看pod日志
kubectl logs [podname]

#查看命名空间更完善的信息
kubectl get pod -o wide

#进入容器控制台
kubectl exec -it [podname] -- /bin/bash

# 删除pod
kubectl delete pod [podname]

#根据文件删除
kubectl delete -f [xx.yaml]

 # 根据podname批量删除
kubectl delete pod podname1 podname2 -n [namespace]

 # 使用Deployment创建pod
kubectl create deployment mytomcat --image=[容器名]

 # 查看有那些Deployment
kubectl get deploymentname]

 # 删除Deployment   pod
kubectl  delete [deploymentname]

 #监控pod状态
kubectl get pod  -w


#deploy扩容      kubectl scale deployment/[appsname] --replicas=[size] 
# 比如原先创建了3个pod,现在--replicas=5,就会在原基础上新增两个
kubectl scale -n default deploy/my-dep --replicas=5

#缩容
kubectl scale -n default deploy/my-dep --replicas=3

# 以yaml文件方式扩缩容
kubectl edit deploy [deployname] 

#以yaml方式查看deploy的信息
kubectl get deploy -oyaml

#改变镜像升级  
#kubectl set image  为谁升级   镜像名=镜像名:版本号 --record
kubectl set image deploy/my-dep  nginx=nginx:1.16.1 --record

#历史记录
kubectl rollout history deployment/my-dep


#查看某个历史详情
kubectl rollout history deployment/my-dep --revision=2

#回滚(回到上次)
kubectl rollout undo deployment/my-dep

#回滚(回到指定版本)
# --to-revision=2  回退到版本2
kubectl rollout undo deployment/my-dep --to-revision=2

# 查看service
kubectl get service 

#暴露Deploy  
kubectl expose deployment my-dep --port=8000 --target-port=80

#删除服务暴露
kubectl delete service [deployment]

#查看ingress
kubectl get ing

#查看暴露目录
exportfs

#查看pv
kubectl get persistentvolume

# kubectl create cm [取个名字] --from-file=[配置文件名称]
kubectl create cm redis-config --from-file=redis.conf

# 查看 ConfigMap
kubectl get cm

#以yaml方式查看 redis-config
kubectl get cm redis-config -oyaml

k8s集群的搭建

安装docker

装备三台机器(云服务机器或者本地虚拟机)

 # 删除旧docker
 yum remove docker \
                  docker-client \
                  docker-client-latest \
                  docker-common \
                  docker-latest \
                  docker-latest-logrotate \
                  docker-logrotate \
                  docker-engine

# 环境
yum install -y yum-utils

# 镜像  阿里云
yum-config-manager \
    --add-repo \
    https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    
# 安装更新
yum makecache fast

# 安装docker-ce
yum install -y docker-ce docker-ce-cli containerd.io

# 配置镜像加速和一些k8s需要的
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://82m9ar63.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2"
}
EOF

#开机启动 (选择)
systemctl enable docker 
# 启动
sudo systemctl start docker

要求

  1. 必须为常规linux系统 (如 centos、Debian、Red Hat 、ubuntu等)
  2. 每台机器2G以上的RAM
  3. 2个以上的cpu核
  4. 网络可以连接互相连接
  5. 节点中的主机名不可以重复
  6. 禁用交换分区,为了保证kubelet正常工作,必须禁用交换分区

linux环境配置

修改主机名

 hostnamectl set-hostname k8s1

关闭selinux

# 临时关闭
setenforce 0
# 永久关闭
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config

关闭交换分区

# 查看交换分区
free -m
#**Swap** 必须全部为0

#禁用交换分区
swapoff -a  

#永久禁用交换分区
sed -ri 's/.*swap.*/#&/' /etc/fstab

#允许 iptables 检查桥接流量

cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF

cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

让以上配置生效

sysctl --system

安装kubelet、kubeadm、kubectl

# 安装配置
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
   http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kubelet kubeadm kubectl
EOF

# 安装
sudo yum install -y kubelet-1.20.9 kubeadm-1.20.9 kubectl-1.20.9 --disableexcludes=kubernetes

# 设置开机启动
sudo systemctl enable --now kubelet

下载各个机器需要的镜像

创建一个脚本,这个脚本会下载每个机器需要的镜像

sudo tee ./images.sh <<-'EOF'
#!/bin/bash
images=(
kube-apiserver:v1.20.9
kube-proxy:v1.20.9
kube-controller-manager:v1.20.9
kube-scheduler:v1.20.9
coredns:1.7.0
etcd:3.4.13-0
pause:3.2
)
for imageName in ${images[@]} ; do
docker pull registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/$imageName
done
EOF

给与脚本的执行权限并执行

chmod +x ./images.sh && ./images.sh

从节点也可以只装kube-proxy镜像

所有机器添加master域名映射

就是在hosts文件中加一个域名映射,每次访问这个域名就会去找这个ip

echo "172.31.1.11  cluster-endpoint" >> /etc/hosts

将主节点初始化, 必须且只能在master节点下运行

# 所有网络范围不重叠
kubeadm init \
--apiserver-advertise-address=172.31.1.11 \
--control-plane-endpoint=cluster-endpoint \
--image-repository registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images \
--kubernetes-version v1.20.9 \
--service-cidr=10.96.0.0/16 \
--pod-network-cidr=192.168.0.0/16

成功信息

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  kubeadm join cluster-endpoint:6443 --token qawy7z.jna7qh5sr2lw3lvt \
    --discovery-token-ca-cert-hash sha256:482feb371039186d6c19b5b499fc9bfe1acffb51173c2ad427d827cd2b145569 \
    --control-plane 

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join cluster-endpoint:6443 --token qawy7z.jna7qh5sr2lw3lvt \
    --discovery-token-ca-cert-hash sha256:482feb371039186d6c19b5b499fc9bfe1acffb51173c2ad427d827cd2b145569

按照成功提示操作

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

查看节点信息

kubectl get nodes

安装网络组件

上面还提示我们需要安装一个网络组件,网络组件有很多可以根据官方给出的网址查看

有那些网络组件

我们这里使用 calico

拉取 calico 配置文件

curl https://docs.projectcalico.org/manifests/calico.yaml -O

使用kubectl 应用这个文件

kubectl apply -f calico.yaml

到这一步报错查询原因可能是因为calico版本问题,所以修改版本解决问题

curl https://docs.projectcalico.org/v3.20/manifests/calico.yaml -O

加入node节点

kubeadm join cluster-endpoint:6443 --token qawy7z.jna7qh5sr2lw3lvt \
    --discovery-token-ca-cert-hash sha256:482feb371039186d6c19b5b499fc9bfe1acffb51173c2ad427d827cd2b145569

如果连接不上,请查看是否防火墙没有关闭,没有则关闭防火墙

systemctl disable firewalld --now

查看所有节点

kubectl get nodes

token的刷新

用来连接的token会在24小时后失效,这时还要连接node节点,则需要刷新token

# token刷新
kubeadm token create --print-join-command

部署dashboard

kubernetes官方提供的可视化界面

# 部署
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml

设置端口访问

kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard

找到 type:usterIP 改为 type: NodePort

# 这个打概是创建端口映射
kubectl get svc -A |grep kubernetes-dashboard

返回信息

kubernetes-dashboard   dashboard-metrics-scraper   ClusterIP   10.96.3.120     <none>        8000/TCP                 5m31s
kubernetes-dashboard   kubernetes-dashboard        NodePort    10.96.249.227   <none>        443:30514/TCP            5m31s
[root@k8s-master ~]# 

然后根据返回信息,开放30514端口

访问必须使用https的方式

https://ip:30514 node节点的ip加这个端口也访问

创建访问账号

# 创建一个yaml文件
vim dash.yaml

文件内容

apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard

运行

kubectl apply -f dash.yaml

获取令牌

#获取访问令牌
kubectl -n kubernetes-dashboard get secret $(kubectl -n kubernetes-dashboard get sa/admin-user -o jsonpath="{.secrets[0].name}") -o go-template="{{.data.token | base64decode}}"

令牌

eyJhbGciOiJSUzI1NiIsImtpZCI6Im5fRHIyVFY4bmFJanZ5MlpOcDB6ZlBTVlFxVHRQSkZQbUlsX3FFUVg5ZUkifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXJjZHFuIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI3N2RiMGQ1OC1iNzIxLTRmODEtYjczNi1mYzU0ZjA4MTlkMTQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.HVwZZtnZSK5IRZ1iFGHry4xoKS8VUYeJP9P8aaxM_HtC0Ij1JuWqrMglnCXv4zAsbm-YHpudvdfUXKSpWWK5gTj2bIZiAOZ2Kze-qkvH1MmL83nDbtLAg07LBygRqaa2O66csi1SBo3vM5meUHRz6OU0aDRvp0QNg3YCZy91sqQ--xkNoUFGvHVTT1xiaUxoVFn3Pn2UMKAg0skh8P2xNdZXJ8lAXs0-h27QIJmzafcW6HRV3OXFSCqdDPftQMaSpToPh1uoKrc_Bohi9SpPMjXztPFpAy0_DYVrxx346WXtBtbhJFymNiiCkpQ-k1EnQwOm85Ive9znFkeirmyOtw

k8s核心

命名空间

#查看命名空间
kubuctl get namespace     
#也可以用简称
kubectl get ns

#查看默认命名空间
kubectl get pods

#查看指定的名称空间
kubectl get pods -n [namespace]

#创建命名空间
jubecyl  create ns  [namespace]

# 删除命名空间 自带的不能删除,删除也会被拒绝
kubectl delete ns [namespace]

yaml方式操作名称空间

#创建并打开文件
vim hello.yaml

内容

apiVersion: v1
kind: Namespace
metadata:
  name: hello

创建

kubectl apply -f hello.yaml

删除

普通删除

kubectl delete ns  hello

配置文件删除

kubectl delete -f hello.yaml

Pod

Pod :运行中的一组容器,pod是kubernetes中应用的最小单位

大概就是kubernetes为了管理容器,对容器进行了一层封装,成为了pod

一个pod中可以运行多个容器

创建

命令方式创建

kubectl run mynginx --image=nginx

查看pod描述,原因

kubectl describe pod [podname]

yaml配置文件方式创建

# 创建文件
vim pod_nginx.yaml

内容

apiVersion: v1
kind: Pod
metadata:
  labels:
    run: mynginx
  name: mynginx
spec:
  containers:
  - image: nginx
    name: mynginx

删除pod

 kubectl delete pod [podname]

 #配置文件删除
 kubectl delete -f pod_nginx.yaml

查看pod日志

kubectl logs [psdname]

查看命名空间更详细的信息

kubectl get pod -o wide
#每个pod k8s 都会分配一个ip

k8s进入容器控制台

kubectl exec -it [podname] -- /bin/bash

pod多容器

创建并打开yaml

vim pod_nginx_tomcat.yaml

内容

apiVersion: v1
kind: Pod
metadata:
  labels:
    run: myapp
  name: myapp
spec:
  containers:
  - image: nginx
    name: nginx
  - image: tomcat:8.5.68
    name: tomcat

创建

kubectl apply -f pod_nginx_tomcat.yaml

同一组pod中的容器使用127.0.0.1加端口及可访问

Deployment

控制Pod,使Pod拥有多副本,自愈,扩缩容等能力

删除之前的pod

#查看有那些
kubectl get pods

# 删除多个
kubectl delete pod myapp mynginx -n default

普通方式创建的pod,删除后者宕机后就没了,而使用Deployment创建的pod,拥有自愈能力,删除或者宕机后,k8s又会创建一个新的。

Deployment创建pod

kubectl create deployment mytomcat --image=tomcat:8.5.68

删除Deployment创建的od

#查看有那些
 kubectl get deployment

#删除上面创建的deployment 
kubectl delete deployment mytomcat

Deployment的多副本

同时起pod

kubectl create deployment my-dep --image=nginx --replicas=3

删除这次部署

kubectl delete deployment my-dep

yaml方式部署

apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: my-dep
  name: my-dep
spec:
  replicas: 3
  selector:
    matchLabels:
      app: my-dep
  template:
    metadata:
      labels:
        app: my-dep
    spec:
      containers:
      - image: nginx
        name: nginx
kubectl apply -f [xxx.yaml]

deployment扩缩容

我们使用deployment部署了3个pod,但某些时候突增的并发导致两个不够用,我们就可以考虑扩容

  • 扩容:kubectl scale
  • 缩容:kubectl scale
# 扩容命令  kubectl scale deployment/[appsname] --replicas=[size] 
# 比如原先创建了3个pod,现在--replicas=5,就会在原基础上新增两个
kubectl scale -n default deploy/my-dep --replicas=5


#缩容  跟上面一样,只需要修改 --replicas=5的数量就好
kubectl scale -n default deploy/my-dep --replicas=3

修改yaml文件方式

kubectl edit deploy [appname] 

打开文件后将 replicas: 3 修改成想要的个数保存退出k8s就会自己扩缩容

deployment自愈与故障转移

  • 自愈:挂了 k8s马上重拉一份
  • 故障转移: 服务器宕机停电啥的没了,k8s检测到,把这个机器的pod在其他机器上拉起一份
#监控pod状态
kubectl get pod  -w

滚动更新

新起一个pod,启动好了只好直接将请求分转到新起的pod上来,然后停掉旧版本

#以yaml的方式查看deployment信息
kubectl get deploy -oyaml

可以看到容器镜像的详细信息,用来升级

#改变镜像升级  
#kubectl set image  为谁升级   镜像名=镜像名:版本号 --record
kubectl set image deploy/my-dep  nginx=nginx:1.16.1 --record

版本回退

#历史记录
kubectl rollout history deployment/my-dep


#查看某个历史详情
kubectl rollout history deployment/my-dep --revision=2

#回滚(回到上次)
kubectl rollout undo deployment/my-dep

#回滚(回到指定版本)
# --to-revision=2  回退到版本2
kubectl rollout undo deployment/my-dep --to-revision=2

其他工作负载

  • Deployment: 无状态应用部署,比如微服务,提供多副本等功能
  • StatefulSet: 有状态应用部署,比如redis,提供稳定的存储,网络等功能
  • DeamonSet: 守护型应用部署,比如日志收集,在每个机器都运行一份,有且只有一份
  • Job/CronJob: 定时任务部署,比如垃圾清理组件,可以在指定时间运行

Service

pod的服务发现与负载均衡

将一组 Pods 公开为网络服务的抽象方法。

ClusterIP

ClusterIP 只能在容器内部访问

# 查看service
kubectl get service 

#暴露Deploy  
# 将一组pods暴露到8000端口,他们本来的端口为80  
# 可以通过 serviceip加port访问
#也可以通过   服务名.命名空间.service 来访问
#                              my-dep.default.svc.8000
#后面这种方式只能在容器里面访问,在容器外机器上访问不了
kubectl expose deployment my-dep --port=8000 --target-port=80

删除

#删除服务暴露
# kubectl delete service [deployment]
kubectl delete service my-dep

yaml文件方式暴露service

apiVersion: v1
kind: Service
metadata:
  labels:
    app: my-dep
  name: my-dep
spec:
  type: ClusterIP
  selector:
    # 选择app=nginx标签的pod
    app: my-dep
  ports:
  - port: 8000
    protocol: TCP
    targetPort: 80

上面的暴露命令等价于

kubectl expose deployment my-dep --port=8000 --target-port=80 --type=ClusterIP

NodePort

NoePort模式让服务在公网也可以访问

kubectl expose deployment my-dep --port=8000 --target-port=80 --type=NodePort

NodePort 暴露的service信息

my-dep       NodePort    10.96.82.168   <none>        8000:32555/TCP   4m26s

NodePort 暴露的端口范围在 30000-32767之间,随机的

这时在本地机器可以通过10.96.82.168:8000访问

外网也可以通过集群中的任意存在ip:32555来访问

Ingress

Ingress安装

官方地址

ingress Service 的统一网关入口

service是pod的一个逻辑分组,是pod服务的对外入口抽象。

ingress 是service层的统一网关入口

k8s没有自带Ingress,需要自己安装

下载安装用的yaml

wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v0.47.0/deploy/static/provider/baremetal/deploy.yaml

因为deploy.yaml文件存在GitHub,可能不好下载,可以自己创建,下面是内容

apiVersion: v1
kind: Namespace
metadata:
  name: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/instance: ingress-nginx

---
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    helm.sh/chart: ingress-nginx-3.33.0
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/version: 0.47.0
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: controller
  name: ingress-nginx
  namespace: ingress-nginx
automountServiceAccountToken: true
---
# Source: ingress-nginx/templates/controller-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  labels:
    helm.sh/chart: ingress-nginx-3.33.0
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/version: 0.47.0
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: controller
  name: ingress-nginx-controller
  namespace: ingress-nginx
data:
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    helm.sh/chart: ingress-nginx-3.33.0
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/version: 0.47.0
    app.kubernetes.io/managed-by: Helm
  name: ingress-nginx
rules:
  - apiGroups:
      - ''
    resources:
      - configmaps
      - endpoints
      - nodes
      - pods
      - secrets
    verbs:
      - list
      - watch
  - apiGroups:
      - ''
    resources:
      - nodes
    verbs:
      - get
  - apiGroups:
      - ''
    resources:
      - services
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - extensions
      - networking.k8s.io   # k8s 1.14+
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - ''
    resources:
      - events
    verbs:
      - create
      - patch
  - apiGroups:
      - extensions
      - networking.k8s.io   # k8s 1.14+
    resources:
      - ingresses/status
    verbs:
      - update
  - apiGroups:
      - networking.k8s.io   # k8s 1.14+
    resources:
      - ingressclasses
    verbs:
      - get
      - list
      - watch
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    helm.sh/chart: ingress-nginx-3.33.0
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/version: 0.47.0
    app.kubernetes.io/managed-by: Helm
  name: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: ingress-nginx
subjects:
  - kind: ServiceAccount
    name: ingress-nginx
    namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  labels:
    helm.sh/chart: ingress-nginx-3.33.0
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/version: 0.47.0
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: controller
  name: ingress-nginx
  namespace: ingress-nginx
rules:
  - apiGroups:
      - ''
    resources:
      - namespaces
    verbs:
      - get
  - apiGroups:
      - ''
    resources:
      - configmaps
      - pods
      - secrets
      - endpoints
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - ''
    resources:
      - services
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - extensions
      - networking.k8s.io   # k8s 1.14+
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - extensions
      - networking.k8s.io   # k8s 1.14+
    resources:
      - ingresses/status
    verbs:
      - update
  - apiGroups:
      - networking.k8s.io   # k8s 1.14+
    resources:
      - ingressclasses
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - ''
    resources:
      - configmaps
    resourceNames:
      - ingress-controller-leader-nginx
    verbs:
      - get
      - update
  - apiGroups:
      - ''
    resources:
      - configmaps
    verbs:
      - create
  - apiGroups:
      - ''
    resources:
      - events
    verbs:
      - create
      - patch
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    helm.sh/chart: ingress-nginx-3.33.0
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/version: 0.47.0
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: controller
  name: ingress-nginx
  namespace: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: ingress-nginx
subjects:
  - kind: ServiceAccount
    name: ingress-nginx
    namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-service-webhook.yaml
apiVersion: v1
kind: Service
metadata:
  labels:
    helm.sh/chart: ingress-nginx-3.33.0
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/version: 0.47.0
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: controller
  name: ingress-nginx-controller-admission
  namespace: ingress-nginx
spec:
  type: ClusterIP
  ports:
    - name: https-webhook
      port: 443
      targetPort: webhook
  selector:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
  annotations:
  labels:
    helm.sh/chart: ingress-nginx-3.33.0
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/version: 0.47.0
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: controller
  name: ingress-nginx-controller
  namespace: ingress-nginx
spec:
  type: NodePort
  ports:
    - name: http
      port: 80
      protocol: TCP
      targetPort: http
    - name: https
      port: 443
      protocol: TCP
      targetPort: https
  selector:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    helm.sh/chart: ingress-nginx-3.33.0
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/version: 0.47.0
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: controller
  name: ingress-nginx-controller
  namespace: ingress-nginx
spec:
  selector:
    matchLabels:
      app.kubernetes.io/name: ingress-nginx
      app.kubernetes.io/instance: ingress-nginx
      app.kubernetes.io/component: controller
  revisionHistoryLimit: 10
  minReadySeconds: 0
  template:
    metadata:
      labels:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/component: controller
    spec:
      dnsPolicy: ClusterFirst
      containers:
        - name: controller
          image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/ingress-nginx-controller:v0.46.0
          imagePullPolicy: IfNotPresent
          lifecycle:
            preStop:
              exec:
                command:
                  - /wait-shutdown
          args:
            - /nginx-ingress-controller
            - --election-id=ingress-controller-leader
            - --ingress-class=nginx
            - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
            - --validating-webhook=:8443
            - --validating-webhook-certificate=/usr/local/certificates/cert
            - --validating-webhook-key=/usr/local/certificates/key
          securityContext:
            capabilities:
              drop:
                - ALL
              add:
                - NET_BIND_SERVICE
            runAsUser: 101
            allowPrivilegeEscalation: true
          env:
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
            - name: LD_PRELOAD
              value: /usr/local/lib/libmimalloc.so
          livenessProbe:
            failureThreshold: 5
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            initialDelaySeconds: 10
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 1
          readinessProbe:
            failureThreshold: 3
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            initialDelaySeconds: 10
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 1
          ports:
            - name: http
              containerPort: 80
              protocol: TCP
            - name: https
              containerPort: 443
              protocol: TCP
            - name: webhook
              containerPort: 8443
              protocol: TCP
          volumeMounts:
            - name: webhook-cert
              mountPath: /usr/local/certificates/
              readOnly: true
          resources:
            requests:
              cpu: 100m
              memory: 90Mi
      nodeSelector:
        kubernetes.io/os: linux
      serviceAccountName: ingress-nginx
      terminationGracePeriodSeconds: 300
      volumes:
        - name: webhook-cert
          secret:
            secretName: ingress-nginx-admission
---
# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
# before changing this value, check the required kubernetes version
# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
  labels:
    helm.sh/chart: ingress-nginx-3.33.0
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/version: 0.47.0
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: admission-webhook
  name: ingress-nginx-admission
webhooks:
  - name: validate.nginx.ingress.kubernetes.io
    matchPolicy: Equivalent
    rules:
      - apiGroups:
          - networking.k8s.io
        apiVersions:
          - v1beta1
        operations:
          - CREATE
          - UPDATE
        resources:
          - ingresses
    failurePolicy: Fail
    sideEffects: None
    admissionReviewVersions:
      - v1
      - v1beta1
    clientConfig:
      service:
        namespace: ingress-nginx
        name: ingress-nginx-controller-admission
        path: /networking/v1beta1/ingresses
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: ingress-nginx-admission
  annotations:
    helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
  labels:
    helm.sh/chart: ingress-nginx-3.33.0
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/version: 0.47.0
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: admission-webhook
  namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: ingress-nginx-admission
  annotations:
    helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
  labels:
    helm.sh/chart: ingress-nginx-3.33.0
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/version: 0.47.0
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: admission-webhook
rules:
  - apiGroups:
      - admissionregistration.k8s.io
    resources:
      - validatingwebhookconfigurations
    verbs:
      - get
      - update
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: ingress-nginx-admission
  annotations:
    helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
  labels:
    helm.sh/chart: ingress-nginx-3.33.0
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/version: 0.47.0
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: admission-webhook
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: ingress-nginx-admission
subjects:
  - kind: ServiceAccount
    name: ingress-nginx-admission
    namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: ingress-nginx-admission
  annotations:
    helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
  labels:
    helm.sh/chart: ingress-nginx-3.33.0
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/version: 0.47.0
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: admission-webhook
  namespace: ingress-nginx
rules:
  - apiGroups:
      - ''
    resources:
      - secrets
    verbs:
      - get
      - create
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: ingress-nginx-admission
  annotations:
    helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
  labels:
    helm.sh/chart: ingress-nginx-3.33.0
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/version: 0.47.0
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: admission-webhook
  namespace: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: ingress-nginx-admission
subjects:
  - kind: ServiceAccount
    name: ingress-nginx-admission
    namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
apiVersion: batch/v1
kind: Job
metadata:
  name: ingress-nginx-admission-create
  annotations:
    helm.sh/hook: pre-install,pre-upgrade
    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
  labels:
    helm.sh/chart: ingress-nginx-3.33.0
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/version: 0.47.0
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: admission-webhook
  namespace: ingress-nginx
spec:
  template:
    metadata:
      name: ingress-nginx-admission-create
      labels:
        helm.sh/chart: ingress-nginx-3.33.0
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/version: 0.47.0
        app.kubernetes.io/managed-by: Helm
        app.kubernetes.io/component: admission-webhook
    spec:
      containers:
        - name: create
          image: docker.io/jettech/kube-webhook-certgen:v1.5.1
          imagePullPolicy: IfNotPresent
          args:
            - create
            - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
            - --namespace=$(POD_NAMESPACE)
            - --secret-name=ingress-nginx-admission
          env:
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
      restartPolicy: OnFailure
      serviceAccountName: ingress-nginx-admission
      securityContext:
        runAsNonRoot: true
        runAsUser: 2000
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
apiVersion: batch/v1
kind: Job
metadata:
  name: ingress-nginx-admission-patch
  annotations:
    helm.sh/hook: post-install,post-upgrade
    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
  labels:
    helm.sh/chart: ingress-nginx-3.33.0
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/version: 0.47.0
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: admission-webhook
  namespace: ingress-nginx
spec:
  template:
    metadata:
      name: ingress-nginx-admission-patch
      labels:
        helm.sh/chart: ingress-nginx-3.33.0
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/version: 0.47.0
        app.kubernetes.io/managed-by: Helm
        app.kubernetes.io/component: admission-webhook
    spec:
      containers:
        - name: patch
          image: docker.io/jettech/kube-webhook-certgen:v1.5.1
          imagePullPolicy: IfNotPresent
          args:
            - patch
            - --webhook-name=ingress-nginx-admission
            - --namespace=$(POD_NAMESPACE)
            - --patch-mutating=false
            - --secret-name=ingress-nginx-admission
            - --patch-failure-policy=Fail
          env:
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
      restartPolicy: OnFailure
      serviceAccountName: ingress-nginx-admission
      securityContext:
        runAsNonRoot: true
        runAsUser: 2000

如果是拉的yaml配置文件,可以修改其中的image(镜像源)

spec:
      dnsPolicy: ClusterFirst
      containers:
        - name: controller
          image: k8s.gcr.io/ingress-nginx/controller:v0.46.0@sha256:52f0058bed0a17ab0fb35628ba97e8d52b5d32299fbc03cc0f6c7b9ff036b61a

修改的镜像

image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/ingress-nginx-controller:v0.46.0

安装

kubectl apply -f deploy.yaml    

Ingress 的安装会产生一个service: ingress-nginx

查看

kubectl get svc -A

ingresscreate

可以使用任意机器ip:port访问,但是会404

Ingress实现域名访问

测试环境搭建

yaml配置文件准备

vim test_Ingress.yaml

内容

apiVersion: apps/v1
kind: Deployment
metadata:
  name: hello-server
spec:
  replicas: 2
  selector:
    matchLabels:
      app: hello-server
  template:
    metadata:
      labels:
        app: hello-server
    spec:
      containers:
      - name: hello-server
        image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/hello-server
        ports:
        - containerPort: 9000
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx-demo
  name: nginx-demo
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx-demo
  template:
    metadata:
      labels:
        app: nginx-demo
    spec:
      containers:
      - image: nginx
        name: nginx
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: nginx-demo
  name: nginx-demo
spec:
  selector:
    app: nginx-demo
  ports:
  - port: 8000
    protocol: TCP
    targetPort: 80
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: hello-server
  name: hello-server
spec:
  selector:
    app: hello-server
  ports:
  - port: 8000
    protocol: TCP
    targetPort: 9000

域名访问

创建yaml

vim ingress-host-bar.yaml

yaml配置

这里的意思就是 凡是 hello.ingress.com/ 下的所有请求全部全部转发到 hello-server service下

demo.ingress.com/ 下的所有请求全部全部转发到 nginx-demo service下

apiVersion: networking.k8s.io/v1
kind: Ingress  
metadata:
  name: ingress-host-bar
spec:
  ingressClassName: nginx
  rules:
  - host: "hello.ingress.com"
    http:
      paths:
      - pathType: Prefix
        path: "/"
        backend:
          service:
            name: hello-server
            port:
              number: 8000
  - host: "demo.ingress.com"
    http:
      paths:
      - pathType: Prefix
        path: "/"  
        backend:
          service:
            name: nginx-demo  
            port:
              number: 8000

创建成功查看

kubectl get ing

因为是测试环境,域名不是真的,所以需要使用的机器上配置hosts

hosts添加k8s master 地址映射

#k8s ingress映射
172.31.1.11 hello.ingress.com
172.31.1.11 demo.ingress.com

现在的情况就是:

使用 test_Ingress.yaml 文件部署了 两组 deploy

hello-server :两个副本,端口暴露为 9000

nginx-demo : 也是两个副本,端口暴露为 80

这个文件同时还起了两个service

hello-server : 上面 hello-server 的两个副本 ,将端口 9000 映射为 8000

nginx-demo : 上面 nginx-demo 的两个副本 ,将端口 80 映射为 8000

通过 ingress-host-bar.yaml 文件 起了一个 Ingress

查看 ingress本身服务的ip

kubectl get svc -A

通过 hello.ingress.com:30714可以访问到 hello-server里的两个副本

通过 demo.ingress.com:30714可以访问到 nginx-demo里的两个副本

修改ngress

kubectl edit ing [ingName]

路径重写

在配置文件中添加一个注解

annotations: nginx.ingress.kubernetes.io/rewrite-target: /$2

标示重写那一块的注解,下面需要重写的路径加上通配符(/|$)(.*)即可实现重写

[官方例子](Rewrite - NGINX Ingress Controller (kubernetes.github.io))

apiVersion: networking.k8s.io/v1
kind: Ingress  
metadata:
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /$2
  name: ingress-host-bar
spec:
  ingressClassName: nginx
  rules:
  - host: "hello.atguigu.com"
    http:
      paths:
      - pathType: Prefix
        path: "/"
        backend:
          service:
            name: hello-server
            port:
              number: 8000
  - host: "demo.atguigu.com"
    http:
      paths:
      - pathType: Prefix
        path: "/nginx(/|$)(.*)"  # 把请求会转给下面的服务,下面的服务一定要能处理这个路径,不能处理就是404
        backend:
          service:
            name: nginx-demo  ## java,比如使用路径重写,去掉前缀nginx
            port:
              number: 8000

测试:

修改ingress-host-bar.yaml为上面的内容

重新一个ingress

使用 demo.ingress.com:30714/nginx 会访问自动重写去掉nginx

流量限制

apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: ingress-limit-rate
  annotations:
    nginx.ingress.kubernetes.io/limit-rps: "1"
spec:
  ingressClassName: nginx
  rules:
  - host: "haha.atguigu.com"
    http:
      paths:
      - pathType: Exact
        path: "/"
        backend:
          service:
            name: nginx-demo
            port:
              number: 8000

存储抽象

如redis、mysql等容器,会存储数据,如果某个容器挂b了,k8s查询到了,就会在其他节点重起一个镜像,但是这太机器上并没有数据,所有k8s实现了存储的抽象

nfs

处理方法就是nfs

统一数据抽象,并在其他节点备份。

环境准备

所有节点安装nfs需要的环境

yum install -y nfs-utils

主节点

#nfs主节点  在master节点暴露这个目录,所有的都能以非安全的方式同步 
echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports

#  创建这个目录
mkdir -p /nfs/data
# 启动rpc远程绑定 启动同步目录的服务
systemctl enable rpcbind --now
# 启动nfs的服务
systemctl enable nfs-server --now
#配置生效
exportfs -r


#查看暴露目录
exportfs

从节点

# 查看那些可以同步的目录 自己master节点的ip
showmount -e 172.31.1.11

#执行以下命令挂载 nfs 服务器上的共享目录到本机路径 
#在本机创建这个目录
mkdir -p /nfs/data
# 同步
mount -t nfs 172.31.1.11:/nfs/data /nfs/data
# 写入一个测试文件
echo "hello nfs server" > /nfs/data/test.txt

在任意节点 执行 cat /nfs/data/test.txt 都能得到hello nfs server

在任意节点写入数据都可以同步

原生方式数据挂载

想要成功,必须先在主机上创建目录

mkdir /nfs/data/nginx-pv
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx-pv-demo
  name: nginx-pv-demo
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx-pv-demo
  template:
    metadata:
      labels:
        app: nginx-pv-demo
    spec:
      containers:
      - image: nginx
        name: nginx
        volumeMounts:
        # 要挂载容器的目录,名称随意
        - name: html
          mountPath: /usr/share/nginx/html
      volumes:
        - name: html
          nfs:
              # 挂载到主机上的目录
            server: 172.31.1.11
            path: /nfs/data/nginx-pv

pv & pvc

原生挂载存在的问题

  • pod删除后挂载在本地的卷是否会删除 —> pod释放删除
  • 如果nfs只有50g,每个pod没有限制的使用空间 —> pod容量上的限制

pv: 持久卷(Persistent Volume),将应用需要持久化的数据保存到指定位置

pvc:持久卷申明(Persistent Volume Claim),申明需要使用的持久卷规格

创建pv

创建三个文件夹

#nfs主节点
mkdir -p /nfs/data/01
mkdir -p /nfs/data/02
mkdir -p /nfs/data/03

yaml

apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv01-10m
spec:
  capacity:
    storage: 10M
  accessModes:
    - ReadWriteMany
  storageClassName: nfs
  nfs:
    path: /nfs/data/01
    server: 172.31.1.11
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv02-1gi
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteMany
  storageClassName: nfs
  nfs:
    path: /nfs/data/02
    server: 172.31.1.11
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv03-3gi
spec:
  capacity:
    storage: 3Gi
  accessModes:
    - ReadWriteMany
  storageClassName: nfs
  nfs:
    path: /nfs/data/03
    server: 172.31.1.11

查看pv

kubectl get persistentvolume
绑定pvc

将nginx-pvc分配200mi,就会自动匹配大小最合适的pv绑定

kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: nginx-pvc
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 200Mi
  storageClassName: nfs

上面的就会绑定 /nfs/data/02 因为分配了200 10m太小,3g又太大

pvc

可以看到绑定状态

直接删除就会取消绑定

kubectl delete -f pvc.yaml

deletepvc

查看pvc

kubectl get pvc

pod + pvc

apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx-deploy-pvc
  name: nginx-deploy-pvc
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx-deploy-pvc
  template:
    metadata:
      labels:
        app: nginx-deploy-pvc
    spec:
      containers:
      - image: nginx
        name: nginx
        volumeMounts:
        - name: html
          mountPath: /usr/share/nginx/html
      volumes:
        - name: html
          #  绑定pvc  claimName==pvc.yaml
          persistentVolumeClaim:
            claimName: pvc

ConfigMap

每个容器可能都有配置文件,所以ConfigMap就是用来处理挂载这些配置文件的

假入现在有一个redis.conf (redis的配置文件)

将redis.conf做成configmap

# kubectl create cm [取个名字] --from-file=[配置文件名称]
kubectl create cm redis-config --from-file=redis.conf

# 查看 ConfigMap
kubectl get cm

#以yaml方式查看 redis-config
kubectl get cm redis-config -oyaml

创建pod时指定ConfigMap

apiVersion: v1
kind: Pod
metadata:
  name: redis
spec:
  containers:
  - name: redis
    image: redis
    command:
      - redis-server
      - "/redis-master/redis.conf"  #指的是redis容器内部的位置
    ports:
    - containerPort: 6379
    volumeMounts:
    - mountPath: /data
      name: data
    - mountPath: /redis-master
      name: config
  volumes:
    - name: data
      emptyDir: {}
    - name: config
      configMap:
        name: redis-conf
        items:
        - key: redis.conf
          path: redis.conf

Secret

下载上传到dockerhub的私有镜像是需要账号密码的,secret为我们解决了这个问题

##命令格式
kubectl create secret docker-registry regcred \
  --docker-server=<你的镜像仓库服务器> \
  --docker-username=<你的用户名> \
  --docker-password=<你的密码> \
  --docker-email=<你的邮箱地址>

查看

kubectl get secret 

#查看具体结构
kubectl get secret [secretName] -oyaml

使用secret

apiVersion: v1
kind: Pod
metadata:
  name: private-nginx
spec:
  containers:
  - name: private-nginx
    image: leifengyang/guignginx:v1.0
  #下载这个镜像的时候用这个账号密码
  imagePullSecrets:
  - name: leifengyang-docker
网盘搭建 2022-07-20
ArrayList源码解析 2022-10-25

评论区