AWS-EKS-06--安装 Amazon EFS CSI 驱动程序

摘要

EFS和EBS的选择?

  • 如果是同一个Pod内的多个容器之间的存储共享,您可以考虑使用EBS卷。

  • 如果是不同的Pod之间的存储共享,此时由于可能跨可用区,您可以考虑使用EFS文件系统。

安装 Amazon EFS CSI 驱动程序

  • 创建 IAM policy和角色

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
# 从 GitHub 下载 IAM policy 文档
$ curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-efs-csi-driver/master/docs/iam-policy-example.json

# 创建策略
$ aws iam create-policy --profile eks-us-west-2 \
--policy-name AmazonEKS_EFS_CSI_Driver_Policy \
--policy-document file://iam-policy-example.json
{
"Policy": {
"PolicyName": "AmazonEKS_EFS_CSI_Driver_Policy",
"PolicyId": "ANPA22DP3G4GHPABMGFXG",
"Arn": "arn:aws:iam::743263909655:policy/AmazonEKS_EFS_CSI_Driver_Policy",
"Path": "/",
"DefaultVersionId": "v1",
"AttachmentCount": 0,
"PermissionsBoundaryUsageCount": 0,
"IsAttachable": true,
"CreateDate": "2023-07-03T07:35:31+00:00",
"UpdateDate": "2023-07-03T07:35:31+00:00"
}
}
# 创建 IAM 角色并向其附加此 IAM policy
# 注意这里并没有指定角色名称,所以会自动生成一个角色名称并绑定到SA上
$ eksctl create iamserviceaccount \
--cluster eks-lexing \
--profile eks-us-west-2 \
--namespace kube-system \
--name efs-csi-controller-sa \
--attach-policy-arn arn:aws:iam::743263909655:policy/AmazonEKS_EFS_CSI_Driver_Policy \
--approve
2023-07-03 15:38:07 [ℹ] 2 existing iamserviceaccount(s) (kube-system/aws-node,kube-system/ebs-csi-controller-sa) will be excluded
2023-07-03 15:38:07 [ℹ] 1 iamserviceaccount (kube-system/efs-csi-controller-sa) was included (based on the include/exclude rules)
2023-07-03 15:38:07 [!] serviceaccounts that exist in Kubernetes will be excluded, use --override-existing-serviceaccounts to override
2023-07-03 15:38:07 [ℹ] 1 task: {
2 sequential sub-tasks: {
create IAM role for serviceaccount "kube-system/efs-csi-controller-sa",
create serviceaccount "kube-system/efs-csi-controller-sa",
} }2023-07-03 15:38:07 [ℹ] building iamserviceaccount stack "eksctl-eks-lexing-addon-iamserviceaccount-kube-system-efs-csi-controller-sa"
2023-07-03 15:38:08 [ℹ] deploying stack "eksctl-eks-lexing-addon-iamserviceaccount-kube-system-efs-csi-controller-sa"
2023-07-03 15:38:08 [ℹ] waiting for CloudFormation stack "eksctl-eks-lexing-addon-iamserviceaccount-kube-system-efs-csi-controller-sa"
2023-07-03 15:38:39 [ℹ] waiting for CloudFormation stack "eksctl-eks-lexing-addon-iamserviceaccount-kube-system-efs-csi-controller-sa"
2023-07-03 15:38:40 [ℹ] created serviceaccount "kube-system/efs-csi-controller-sa"

# 查看创建后的SA,这里可以看到自动创建的角色名称
$ k describe sa efs-csi-controller-sa
Name: efs-csi-controller-sa
Namespace: kube-system
Labels: app.kubernetes.io/managed-by=eksctl
Annotations: eks.amazonaws.com/role-arn: arn:aws:iam::743263909644:role/eksctl-eks-lexing-addon-iamserviceaccount-ku-Role1-BOQ7WRWAQDOY
Image pull secrets: <none>
Mountable secrets: <none>
Tokens: <none>
Events: <none>
  • 安装 Amazon EFS 驱动程序

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
# 添加 Helm 存储库。
$ helm repo add aws-efs-csi-driver https://kubernetes-sigs.github.io/aws-efs-csi-driver/
# 更新存储库
$ helm repo update aws-efs-csi-driver

# 使用 Helm Chart 安装驱动程序的版本。
# 请将存储库地址替换为集群的容器镜像地址:https://docs.aws.amazon.com/zh_cn/eks/latest/userguide/add-ons-images.html
$ helm upgrade -i aws-efs-csi-driver aws-efs-csi-driver/aws-efs-csi-driver \
--namespace kube-system \
--set image.repository=602401143452.dkr.ecr.us-west-2.amazonaws.com/eks/aws-efs-csi-driver \
--set controller.serviceAccount.create=false \
--set controller.serviceAccount.name=efs-csi-controller-sa
Release "aws-efs-csi-driver" has been upgraded. Happy Helming!
NAME: aws-efs-csi-driver
LAST DEPLOYED: Mon Jul 3 16:08:38 2023
NAMESPACE: kube-system
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
To verify that aws-efs-csi-driver has started, run:

kubectl get pod -n kube-system -l "app.kubernetes.io/name=aws-efs-csi-driver,app.kubernetes.io/instance=aws-efs-csi-driver"

# 查看pod启动情况
$ kubectl get pod -n kube-system -l "app.kubernetes.io/name=aws-efs-csi-driver,app.kubernetes.io/instance=aws-efs-csi-driver"
NAME READY STATUS RESTARTS AGE
efs-csi-controller-5c86cf4947-77dxh 3/3 Running 0 23s
efs-csi-controller-5c86cf4947-w6s6m 3/3 Running 0 23s
efs-csi-node-22mbf 3/3 Running 0 7m41s
efs-csi-node-swc6t 3/3 Running 0 7m40s

# 如果pod启动失败,可能是镜像下载的问题,可以先删除deploy后重新执行 helm upgrade ...
$ k delete deploy -n kube-system efs-csi-controller

创建 Amazon EFS 文件系统

  • 创建安全组

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
# 检索您的集群所在的 VPC ID,并将其存储在变量中,以便在后续步骤中使用。
$ vpc_id=$(aws eks describe-cluster \
--name eks-lexing \
--profile eks-us-west-2 \
--query "cluster.resourcesVpcConfig.vpcId" \
--output text)
$ echo $vpc_id
vpc-088a65d5af782c20a

# 检索您的集群的 VPC 的 CIDR 范围,并将其存储在变量中,以便在后续步骤中使用
$ cidr_range=$(aws ec2 describe-vpcs \
--vpc-ids $vpc_id \
--profile eks-us-west-2 \
--query "Vpcs[].CidrBlock" \
--output text)
$ echo $cidr_range
192.168.0.0/16

# 创建一个安全组,该安全组包含一条允许您的 Amazon EFS 挂载点的入站 NFS 流量的入站规则
$ security_group_id=$(aws ec2 create-security-group \
--group-name MyEfsSecurityGroup \
--description "My EFS security group" \
--vpc-id $vpc_id \
--profile eks-us-west-2 \
--output text)
$ echo $security_group_id
sg-0d77e111b519834be

# 创建一条入站规则,该入站规则允许来自您的集群 VPC 的 CIDR 的入站 NFS 流量。
$ aws ec2 authorize-security-group-ingress \
--profile eks-us-west-2 \
--group-id $security_group_id \
--protocol tcp \
--port 2049 \
--cidr $cidr_range
{
"Return": true,
"SecurityGroupRules": [
{
"SecurityGroupRuleId": "sgr-09e1d9d2ec6d9bbe9",
"GroupId": "sg-0d77e111b519834be",
"GroupOwnerId": "743263909644",
"IsEgress": false,
"IpProtocol": "tcp",
"FromPort": 2049,
"ToPort": 2049,
"CidrIpv4": "192.168.0.0/16"
}
]
}

  • 创建EFS

1
2
3
4
5
6
7
8
# 创建文件系统
$ file_system_id=$(aws efs create-file-system \
--profile eks-us-west-2 \
--performance-mode generalPurpose \
--query 'FileSystemId' \
--output text)
$ echo $file_system_id
fs-09447193939058538

EFS挂载目标和安全组(网络配置)

  • 确定集群中节点所在的子网的 ID 以及子网所在的可用区

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
# 创建挂载目标
# 确定您的集群节点的 IP 地址
$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
ip-192-168-16-155.us-west-2.compute.internal Ready <none> 3d20h v1.26.4-eks-0a21954
ip-192-168-48-14.us-west-2.compute.internal Ready <none> 3d21h v1.26.4-eks-0a21954

# 确定 VPC 中子网的 ID 以及子网所在的可用区
$ aws ec2 describe-subnets \
--profile eks-us-west-2 \
--filters "Name=vpc-id,Values=$vpc_id" \
--query 'Subnets[*].{SubnetId: SubnetId,AvailabilityZone: AvailabilityZone,CidrBlock: CidrBlock}' \
--output table
----------------------------------------------------------------------
| DescribeSubnets |
+------------------+--------------------+----------------------------+
| AvailabilityZone | CidrBlock | SubnetId |
+------------------+--------------------+----------------------------+
| us-west-2b | 192.168.128.0/19 | subnet-0e3290a40a483710d |
| us-west-2a | 192.168.96.0/19 | subnet-0f6ec8eb7dafcccc0 |
| us-west-2b | 192.168.32.0/19 | subnet-035d3e96d37614fc4 |
| us-west-2d | 192.168.64.0/19 | subnet-0ca60ec494d31c2cb |
| us-west-2a | 192.168.0.0/19 | subnet-04c4cbdbfdfd8d0d5 |
| us-west-2d | 192.168.160.0/19 | subnet-026f4c6fd339f0dbd |
+------------------+--------------------+----------------------------+

# 计算node节点的网络地址
# 如下为linux下使用ipcalc命令的方式
$ ipcalc -n 192.168.16.155/19
NETWORK=192.168.0.0
$ ipcalc -n 192.168.48.14/19
NETWORK=192.168.32.0

# mac下使用ipcalc命令,安装 : brew install ipcalc
$ ipcalc -b 192.168.16.155/19 | grep Network
Network: 192.168.0.0/19

# 192-168-16-155 节点的ip地址是属于 | us-west-2a | 192.168.0.0/19 | subnet-04c4cbdbfdfd8d0d5 |
# 192-168-48-14 节点的ip地址是属于 | us-west-2b | 192.168.32.0/19 | subnet-035d3e96d37614fc4 |
  • 为节点所在的子网添加挂载目标

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
# 为每个 AZ 中有节点的子网运行一次挂载命令,注意替换相应的子网 ID,所以这里两个节点都要创建挂载目标
$ aws efs create-mount-target \
--profile eks-us-west-2 \
--file-system-id $file_system_id \
--subnet-id subnet-04c4cbdbfdfd8d0d5 \
--security-groups $security_group_id
{
"OwnerId": "743263909644",
"MountTargetId": "fsmt-0368ff60b5e4c39ed",
"FileSystemId": "fs-09447193939058538",
"SubnetId": "subnet-04c4cbdbfdfd8d0d5",
"LifeCycleState": "creating",
"IpAddress": "192.168.4.196",
"NetworkInterfaceId": "eni-03068e2d0265fe8e8",
"AvailabilityZoneId": "usw2-az1",
"AvailabilityZoneName": "us-west-2a",
"VpcId": "vpc-088a65d5af782c20a"
}

$ aws efs create-mount-target \
--profile eks-us-west-2 \
--file-system-id $file_system_id \
--subnet-id subnet-035d3e96d37614fc4 \
--security-groups $security_group_id
{
"OwnerId": "743263909644",
"MountTargetId": "fsmt-0612899b2439161c7",
"FileSystemId": "fs-09447193939058538",
"SubnetId": "subnet-035d3e96d37614fc4",
"LifeCycleState": "creating",
"IpAddress": "192.168.62.58",
"NetworkInterfaceId": "eni-051bfd62c82bc0b51",
"AvailabilityZoneId": "usw2-az2",
"AvailabilityZoneName": "us-west-2b",
"VpcId": "vpc-088a65d5af782c20a"
}

# 检索文件系统的装载目标列表
$ aws efs describe-mount-targets --file-system-id $file_system_id --profile eks-us-west-2
{
"MountTargets": [
{
"OwnerId": "743263909644",
"MountTargetId": "fsmt-0368ff60b5e4c39ed",
"FileSystemId": "fs-09447193939058538",
"SubnetId": "subnet-04c4cbdbfdfd8d0d5",
"LifeCycleState": "available",
"IpAddress": "192.168.4.196",
"NetworkInterfaceId": "eni-03068e2d0265fe8e8",
"AvailabilityZoneId": "usw2-az1",
"AvailabilityZoneName": "us-west-2a",
"VpcId": "vpc-088a65d5af782c20a"
},
{
"OwnerId": "743263909644",
"MountTargetId": "fsmt-0612899b2439161c7",
"FileSystemId": "fs-09447193939058538",
"SubnetId": "subnet-035d3e96d37614fc4",
"LifeCycleState": "available",
"IpAddress": "192.168.62.58",
"NetworkInterfaceId": "eni-051bfd62c82bc0b51",
"AvailabilityZoneId": "usw2-az2",
"AvailabilityZoneName": "us-west-2b",
"VpcId": "vpc-088a65d5af782c20a"
}
]
}

删除EFS

  • 在使用AWS CLI命令删除文件系统之前,必须先删除为文件系统创建的所有装载目标和接入点。

  • 删除现有的挂载目标

1
2
3
4
# 注意要替换 --mount-target-id fsmt-0368ff60b5e4c39ed,要删除几个挂载目标就执行几次命令
$ aws efs delete-mount-target \
--mount-target-id fsmt-0368ff60b5e4c39ed \
--profile eks-us-west-2
  • 删除efs

1
2
3
4
# 控制台也可以删除
$ aws efs delete-file-system \
--file-system-id $file_system_id \
--profile eks-us-west-2

测试

Dynamic-demo

  • 检索您的 Amazon EFS 文件系统 ID。您可以在 Amazon EFS 控制台中查找此信息,或者使用以下 AWS CLI 命令。

1
2
3
# 检索您的 Amazon EFS 文件系统 ID
$ aws efs describe-file-systems --profile eks-us-west-2 --query "FileSystems[*].FileSystemId" --output text
fs-09447193939058538
  • 创建storageclass

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
# storageclass.yaml,fileSystemId: fs-09447193939058538
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: efs-sc
provisioner: efs.csi.aws.com # 指定用于动态卷分配的 CSI 驱动程序
parameters:
provisioningMode: efs-ap # 指定 EFS 文件系统的挂载模式为 "efs-ap"。这表示 EFS 将以 AccessPoint 的形式挂载到 Kubernetes Pod 中
fileSystemId: fs-09447193939058538 # 指定用于动态卷分配的 EFS 文件系统的 ID
directoryPerms: "700" # 指定新创建的目录的权限模式。这里的 "700" 表示目录的权限为 rwx------。
gidRangeStart: "1000" # 可选参数,指定新创建的目录的 GID 范围的起始值。
gidRangeEnd: "2000" # 可选参数,指定新创建的目录的 GID 范围的结束值。
basePath: "/dynamic_provisioning" # 可选参数,指定新创建的目录的基本路径。这里的 "/dynamic_provisioning" 是一个示例路径,你可以根据需要设置合适的基本路径。

# provisioningMode 可以设置以下值:
# efs-ap: 使用 Access Point(访问点)模式挂载 EFS 文件系统。在此模式下,每个挂载点都会创建一个唯一的访问点,并授予 Pod 对该访问点的访问权限。每个访问点都可以独立设置权限和配额,从而实现更好的隔离和控制。
# efs-csi: 使用传统的 EFS CSI 挂载模式。在此模式下,使用文件系统 ID 直接挂载整个 EFS 文件系统,而不是使用访问点。这种模式下的挂载是共享的,所有使用该存储类的 Pod 都将共享相同的文件系统和权限。
# provisioningMode 的默认值是 efs-csi。

# 部署
$ k apply -f storageclass.yaml
storageclass.storage.k8s.io/efs-sc created
# 查看sc
$ k get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
efs-sc efs.csi.aws.com Delete Immediate false 7s
gp2 (default) kubernetes.io/aws-ebs Delete WaitForFirstConsumer false 5d3h
gp3 ebs.csi.aws.com Delete WaitForFirstConsumer true 5d
  • 创建pvc,指定 storageClassName: efs-sc

1
2
3
4
5
6
7
8
9
10
11
12
13
14
# pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: efs-claim
spec:
accessModes:
- ReadWriteMany
storageClassName: efs-sc
resources:
requests:
storage: 5Gi
# 部署pvc
$ k apply -f pvc.yaml
  • 创建pod

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
# pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: efs-app
spec:
containers:
- name: app
image: centos
command: ["/bin/sh"]
args: ["-c", "while true; do echo $(date -u) >> /data/out; sleep 5; done"]
volumeMounts:
- name: persistent-storage
mountPath: /data
volumes:
- name: persistent-storage
persistentVolumeClaim:
claimName: efs-claim
# 部署pod
$ k apply -f pod.yaml
  • 查看

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
# 查看pv
$ k get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-edc00435-ef8e-49c6-9e3d-9697735e7108 5Gi RWX Delete Bound test/efs-claim efs-sc 62s

# 查看pvc
$ k get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
efs-claim Bound pvc-edc00435-ef8e-49c6-9e3d-9697735e7108 5Gi RWX efs-sc 72s

# 查看pod
$ k get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
efs-app 1/1 Running 0 2m41s 192.168.34.220 ip-192-168-48-14.us-west-2.compute.internal <none> <none>

# 查看输出
$ k exec -it efs-app -- bash -c "cat data/out"
Mon Jul 3 09:39:49 UTC 2023
Mon Jul 3 09:39:54 UTC 2023
Mon Jul 3 09:39:59 UTC 2023
Mon Jul 3 09:40:04 UTC 2023
Mon Jul 3 09:40:09 UTC 2023
…………………………………………………………………………

# 查看挂载
$ k exec -it efs-app -- bash -c "df -h"
Filesystem Size Used Avail Use% Mounted on
overlay 80G 4.2G 76G 6% /
tmpfs 64M 0 64M 0% /dev
tmpfs 3.8G 0 3.8G 0% /sys/fs/cgroup
127.0.0.1:/ 8.0E 0 8.0E 0% /data
/dev/nvme0n1p1 80G 4.2G 76G 6% /etc/hosts
shm 64M 0 64M 0% /dev/shm
tmpfs 6.9G 12K 6.9G 1% /run/secrets/kubernetes.io/serviceaccount
tmpfs 3.8G 0 3.8G 0% /proc/acpi
tmpfs 3.8G 0 3.8G 0% /sys/firmware
  • 此时只删除pod并重新创建pod,则之前的磁盘数据还在。如果删除pvc并重建pvc,则原先的数据就没有了。

  • 删除测试用例

1
2
3
$ k delete -f pod.yaml
$ k delete -f pvc.yaml
$ k delete -f storageclass.yaml

Static-demo

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
# 示例项目
$ git clone https://github.com/kubernetes-sigs/aws-efs-csi-driver.git
# 这个目录下有很多示例
$ cd aws-efs-csi-driver/examples/kubernetes/
# 比如这里以 multiple_pods 为例说明,两个pod挂载同一个efs

# storageclass.yaml
# provisioningMode 的默认值是 efs-csi
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: efs-sc
provisioner: efs.csi.aws.com

# pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: efs-pv
spec:
capacity:
storage: 5Gi # efs会忽略,efs是没有大小限制的,但是为了符合k8s创建pv的语法规则,这里随便写一个值
volumeMode: Filesystem
accessModes:
- ReadWriteMany # 支持多个pod同时读写
persistentVolumeReclaimPolicy: Retain
storageClassName: efs-sc
csi:
driver: efs.csi.aws.com
volumeHandle: fs-09447193939058538

# claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: efs-claim
spec:
accessModes:
- ReadWriteMany # 支持多个pod同时读写
storageClassName: efs-sc
resources:
requests:
storage: 5Gi

# pod1.yaml
apiVersion: v1
kind: Pod
metadata:
name: app1
spec:
containers:
- name: app1
image: busybox
command: ["/bin/sh"]
args: ["-c", "while true; do echo $(date -u) >> /data/out1.txt; sleep 5; done"]
volumeMounts:
- name: persistent-storage
mountPath: /data
volumes:
- name: persistent-storage
persistentVolumeClaim:
claimName: efs-claim

# pod2.yaml
apiVersion: v1
kind: Pod
metadata:
name: app2
spec:
containers:
- name: app2
image: busybox
command: ["/bin/sh"]
args: ["-c", "while true; do echo $(date -u) >> /data2/out2.txt; sleep 5; done"]
volumeMounts:
- name: persistent-storage
mountPath: /data2
volumes:
- name: persistent-storage
persistentVolumeClaim:
claimName: efs-claim


# 编辑 specs/pv.yaml 文件并将 volumeHandle 值替换为您的 Amazon EFS 文件系统 ID
$ k apply -f specs/

# 查看pv
$ k get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
efs-pv 5Gi RWX Retain Bound test/efs-claim efs-sc 9m11s

# 查看pvc
$ k get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
efs-claim Bound efs-pv 5Gi RWX efs-sc 9m16s~

# 查看pod
$ k get pod
NAME READY STATUS RESTARTS AGE
app1 1/1 Running 0 9m20s
app2 1/1 Running 0 9m19s

# 两个pod挂载的是同一个efs,所以两个pod内部会看到对方的文件
$ k exec -it app1 -- ls /data
dynamic_provisioning out1.txt out2.txt

$ k exec -it app2 -- ls /data2
dynamic_provisioning out1.txt out2.txt

# 清除测试内容
$ k delete -f specs/