diff --git a/.gitignore b/.gitignore
index 44296975..64210fce 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,7 +6,6 @@ builds/
*.exe
vendor/
bin/
-csi-unity
semver.mk
go.sum
./csi-unity
diff --git a/CSI Driver for Dell EMC Unity Product Guide.pdf b/CSI Driver for Dell EMC Unity Product Guide.pdf
index be46facf..3b01261f 100644
Binary files a/CSI Driver for Dell EMC Unity Product Guide.pdf and b/CSI Driver for Dell EMC Unity Product Guide.pdf differ
diff --git a/CSI Driver for Dell EMC Unity Release Notes.pdf b/CSI Driver for Dell EMC Unity Release Notes.pdf
index 4133037b..1df0b532 100644
Binary files a/CSI Driver for Dell EMC Unity Release Notes.pdf and b/CSI Driver for Dell EMC Unity Release Notes.pdf differ
diff --git a/Dockerfile b/Dockerfile
index 9f393cde..82a01f5c 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,20 +1,48 @@
-# Dockerfile to build PowerStore CSI Driver
-FROM centos:7.6.1810
+# Stage to build the driver
+FROM golang:1.13 as builder
+RUN mkdir -p /go/src
+COPY csi-unity/ /go/src/csi-unity
+WORKDIR /go/src/csi-unity
+RUN mkdir -p bin
+RUN go generate
+RUN GOOS=linux CGO_ENABLED=0 GOARCH=amd64 go build -ldflags '-extldflags "-static"' -o bin/csi-unity
+# Print the version
+RUN go run core/semver/semver.go -f mk
+
+# Dockerfile to build Unity CSI Driver
+FROM registry.access.redhat.com/ubi7/ubi-minimal:7.8-328 as driver
# dependencies, following by cleaning the cache
-RUN yum install -y e2fsprogs xfsprogs which nfs-utils device-mapper-multipath \
+RUN microdnf install -y --enablerepo=rhel-7-server-rpms e2fsprogs xfsprogs nfs-utils device-mapper-multipath \
&& \
- yum clean all \
+ microdnf clean all \
&& \
rm -rf /var/cache/run
+COPY --from=builder /go/src/csi-unity/bin/csi-unity /
+COPY csi-unity/scripts/run.sh /
+RUN chmod 777 /run.sh
+ENTRYPOINT ["/run.sh"]
-# validate some cli utilities are found
-RUN which mkfs.ext4
-RUN which mkfs.xfs
+# Stage to check for critical and high CVE issues via Trivy (https://github.com/aquasecurity/trivy)
+# will break image build if CRITICAL issues found
+# will print out all HIGH issues found
+FROM driver as trivy-ubi7m
+RUN microdnf install -y tar
-COPY "bin/csi-unity" /
-COPY "scripts/run.sh" /
+FROM trivy-ubi7m as trivy
+RUN curl https://raw.githubusercontent.com/aquasecurity/trivy/master/contrib/install.sh | sh
+RUN trivy fs -s CRITICAL --exit-code 1 / && \
+ trivy fs -s HIGH / && \
+ trivy image --reset && \
+ rm ./bin/trivy
-RUN chmod 777 /run.sh
+# final stage
+FROM driver as final
-ENTRYPOINT ["/run.sh"]
+LABEL vendor="Dell Inc." \
+ name="csi-unity" \
+ summary="CSI Driver for Dell EMC Unity" \
+ description="CSI Driver for provisioning persistent storage from Dell EMC Unity" \
+ version="1.3.0" \
+ license="Apache-2.0"
+COPY csi-unity/licenses /licenses
\ No newline at end of file
diff --git a/Makefile b/Makefile
index 9a5e67a0..32913a48 100644
--- a/Makefile
+++ b/Makefile
@@ -30,18 +30,16 @@ unit-test:
# Docker-related tasks
#
# Generates the docker container (but does not push)
-docker-build: go-build
- cd core && go generate
- go run core/semver/semver.go -f mk >semver.mk
- make -f docker.mk docker-build
+podman-build: go-build
+ sh build.sh
-docker-push:
- make -f docker.mk docker-push
+podman-push: go-build
+ sh build.sh -p
version:
go generate
go run core/semver/semver.go -f mk >semver.mk
- make -f docker.mk version
+ sh build.sh -h
.PHONY: clean
clean:
diff --git a/README.md b/README.md
index e819f2c7..e8bb31de 100644
--- a/README.md
+++ b/README.md
@@ -1,48 +1,60 @@
# Unity CSI
-This repo contains [Container Storage Interface(CSI)]
-() Unity CSI driver for DellEMC.
+[![Go Report Card](https://goreportcard.com/badge/github.com/dell/csi-unity)](https://goreportcard.com/report/github.com/dell/csi-unity)
+[![License](https://img.shields.io/github/license/dell/csi-unity)](https://github.com/dell/csi-unity/blob/master/LICENSE)
+[![Docker](https://img.shields.io/docker/pulls/dellemc/csi-unity.svg?logo=docker)](https://hub.docker.com/r/dellemc/csi-unity)
+[![Last Release](https://img.shields.io/github/v/release/dell/csi-unity?label=latest&style=flat-square)](https://github.com/dell/csi-unity/releases)
+
+This repo contains [Container Storage Interface(CSI)]() Unity CSI driver for DellEMC.
## Overview
-Unity CSI plugins implement an interface between CSI enabled Container Orchestrator(CO) and Unity Storage Array. It allows dynamically provisioning Unity volumes and attaching them to workloads.
+Unity CSI plugins implement an interface between CSI enabled Container Orchestrator(CO) and Unity Storage Array. It allows static and dynamic provisioning of Unity volumes and attaching them to workloads.
+
+## Support
+
+The CSI Driver for Dell EMC Unity image, which is the built driver code, is available on Dockerhub and is officially supported by Dell EMC.
+The source code for CSI Driver for Dell EMC Unity available on Github is unsupported and provided solely under the terms of the license attached to the source code. For clarity, Dell EMC does not provide support for any source code modifications.
+For any CSI driver issues, questions or feedback, join the [Dell EMC Container community]()
## Introduction
The CSI Driver For Dell EMC Unity conforms to CSI spec 1.1
- * Support for Kubernetes 1.14 and 1.16
+ * Support for Kubernetes v1.17, v1.18 and v1.19
* Will add support for other orchestrators over time
- * The CSI specification is documented here: https://github.com/container-storage-interface/spec. The driver uses CSI v1.1.
+ * The CSI specification is documented here: https://github.com/container-storage-interface/spec/tree/release-1.1. The driver uses CSI v1.1.`
## CSI Driver For Dell EMC Unity Capabilities
| Capability | Supported | Not supported |
|------------|-----------| --------------|
-|Provisioning | Persistent volumes creation, deletion, mounting, unmounting, listing | Volume expand |
+|Provisioning | Persistent volumes creation, deletion, mounting, unmounting, expansion | |
|Export, Mount | Mount volume as file system | Raw volumes, Topology|
-|Data protection | Creation of snapshots, Create volume from snapshots(FC/iSCSI) | Cloning volume, Create volume from snapshots(NFS) |
+|Data protection | Creation of snapshots, Create volume from snapshots, Volume Cloning | |
|Types of volumes | Static, Dynamic| |
|Access mode | RWO(FC/iSCSI), RWO/RWX/ROX(NFS) | RWX/ROX(FC/iSCSI)|
-|Kubernetes | v1.14, v1.16 | V1.13 or previous versions|
-|Installer | Helm v3.x,v2.x | Operator |
-|OpenShift | v4.3 (Helm installation only) | v4.2 |
-|OS | RHEL 7.6, RHEL 7.7, CentOS 7.6, CentOS 7.7 | Ubuntu, other Linux variants|
+|Kubernetes | v1.17, v1.18, v1.19 | V1.16 or previous versions|
+|Docker EE | v3.1 | Other versions|
+|Installer | Helm v3.x, Operator | |
+|OpenShift | v4.3 (except snapshot), v4.4 | Other versions |
+|OS | RHEL 7.6, RHEL 7.7, RHEL 7.8, CentOS 7.6, CentOS 7.7, CentOS 7.8 | Ubuntu, other Linux variants|
|Unity | OE 5.0.0, 5.0.1, 5.0.2, 5.0.3 | Previous versions and Later versions|
|Protocol | FC, iSCSI, NFS | |
## Installation overview
-The Helm chart installs CSI Driver for Unity using a shell script (helm/install.unity). This script installs the CSI driver container image along with the required Kubernetes sidecar containers.
+Installation in a Kubernetes cluster should be done using the scripts within the `dell-csi-helm-installer` directory.
-** Note: Linux user should have root privileges to install this CSI Driver.**
+For more information, consult the [README.md](dell-csi-helm-installer/README.md)
-The controller section of the Helm chart installs the following components in a Stateful Set in the namespace unity:
+The controller section of the Helm chart installs the following components in a Stateful Set:
* CSI Driver for Unity
* Kubernetes Provisioner, which provisions the provisioning volumes
* Kubernetes Attacher, which attaches the volumes to the containers
* Kubernetes Snapshotter, which provides snapshot support
+* Kubernetes Resizer, which provides volume expansion support
-The node section of the Helm chart installs the following component in a Daemon Set in the namespace unity:
+The node section of the Helm chart installs the following component in a Daemon Set:
* CSI Driver for Unity
* Kubernetes Registrar, which handles the driver registration
@@ -54,54 +66,11 @@ Before you install CSI Driver for Unity, verify the requirements that are mentio
#### Requirements
* Install Kubernetes
-* Enable the Kubernetes feature gates
* Configure Docker service
-* Install Helm v2 with Tiller with a service account or Helm v3
-* Deploy Unity using Helm
+* Install Helm v3
+* To use FC protocol, host must be zoned with Unity array
* To use iSCSI and NFS protocol, iSCSI initiator and NFS utility packages need to be installed
-## Enable Kubernetes feature gates
-
-The Kubernetes feature gates must be enabled before installing CSI Driver for Unity.
-
-#### About Enabling Kubernetes feature gates
-
-The Feature Gates section of Kubernetes home page lists the Kubernetes feature gates. The following Kubernetes feature gates must be enabled:
-
-* VolumeSnapshotDataSource
-
-### Procedure
-
- 1. On each master and node of Kubernetes, edit /var/lib/kubelet/config.yaml and append the following lines at the end to set feature-gate settings for the kubelets:
- */var/lib/kubelet/config.yaml*
-
- ```
- VolumeSnapshotDataSource: true
- ```
-
-2. On the master node, set the feature gate settings of the kube-apiserver.yaml, kube-controllermanager.yaml and kube-scheduler.yaml file as follows:
-
- */etc/kubernetes/manifests/kube-apiserver.yaml
- /etc/kubernetes/manifests/kube-controller-manager.yaml
- /etc/kubernetes/manifests/kube-scheduler.yaml*
-
- ```
- - --feature-gates=VolumeSnapshotDataSource=true
- ```
-
-3. On each node (including master), edit the variable **KUBELET_KUBECONFIG_ARGS** of /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf file as follows:
-
- ```
- Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --feature-gates=VolumeSnapshotDataSource=true"
- ```
-
-4. Restart the kublet on all nodes.
-
- ```
- systemctl daemon-reload
- systemctl restart kubelet
- ```
-
## Configure Docker service
The mount propagation in Docker must be configured on all Kubernetes nodes before installing CSI Driver for Unity.
@@ -129,7 +98,7 @@ Install CSI Driver for Unity using this procedure.
*Before you begin*
* You must have the downloaded files, including the Helm chart from the source [git repository](https://github.com/dell/csi-unity), ready for this procedure.
- * In the top-level helm directory, there should be two shell scripts, *install.unity* and *uninstall.unity*. These scripts handle some of the pre and post operations that cannot be performed in the helm chart, such as creating Custom Resource Definitions (CRDs), if needed.
+ * In the top-level dell-csi-helm-installer directory, there should be two scripts, *csi-install.sh* and *csi-uninstall.sh*. These scripts handle some of the pre and post operations that cannot be performed in the helm chart, such as creating Custom Resource Definitions (CRDs), if needed.
* Make sure "unity" namespace exists in kubernetes cluster. Use `kubectl create namespace unity` command to create the namespace, if the namespace is not present.
@@ -137,7 +106,7 @@ Procedure
1. Collect information from the Unity Systems like Unique ArrayId, IP address, username and password. Make a note of the value for these parameters as they must be entered in the secret.json and myvalues.yaml file.
-2. Copy the csi-unity/values.yaml into a file in the same directory as the install.unity named myvalues.yaml, to customize settings for installation.
+2. Copy the csi-unity/values.yaml into a file named myvalues.yaml in the same directory of csi-install.sh, to customize settings for installation.
3. Edit myvalues.yaml to set the following parameters for your installation:
@@ -145,7 +114,7 @@ Procedure
| Parameter | Description | Required | Default |
| --------- | ----------- | -------- |-------- |
- | certSecretCount | Represents number of certificate secrets, which user is going to create for ssl authentication. (unity-cert-0..unity-cert-n). Value should be between 1 and 10 | false | 1 |
+ | certSecretCount | Represents number of certificate secrets, which user is going to create for ssl authentication. (unity-cert-0..unity-cert-n). Minimum value should be 1 | false | 1 |
| syncNodeInfoInterval | Time interval to add node info to array. Default 15 minutes. Minimum value should be 1 minute | false | 15 |
| volumeNamePrefix | String to prepend to any volumes created by the driver | false | csivol |
| snapNamePrefix | String to prepend to any snapshot created by the driver | false | csi-snap |
@@ -167,7 +136,7 @@ Procedure
| ***Snapshot Class parameters*** | Following parameters are not present in values.yaml |
| storageArrayList[i] .snapshotClass.retentionDuration | TO set snapshot retention duration. Format:"1:23:52:50" (number of days:hours:minutes:sec)| false | "" |
- Note: User should provide all boolean values with double quotes. This applicable only for myvalues.yaml. Ex: "true"/"false"
+ **Note**: User should provide all boolean values with double quotes. This applicable only for myvalues.yaml. Ex: "true"/"false"
Example *myvalues.yaml*
@@ -203,7 +172,9 @@ Procedure
nasServer: "nasserver_2"
```
-4. Prepare the secret.json for driver configuration.
+4. Create an empty secret by navigating to helm folder that contains emptysecret.yaml file and running the kubectl create -f emptysecret.yaml command.
+
+5. Prepare the secret.json for driver configuration.
The following table lists driver configuration parameters for multiple storage arrays.
| Parameter | Description | Required | Default |
@@ -243,51 +214,88 @@ Procedure
`kubectl create secret generic unity-creds -n unity --from-file=config=secret.json -o yaml --dry-run | kubectl replace -f -`
- Note: The user needs to validate the JSON syntax and array related key/values while replacing the unity-creds secret.
+ **Note**: The user needs to validate the JSON syntax and array related key/values while replacing the unity-creds secret.
The driver will continue to use previous values in case of an error found in the JSON file.
-4. Run the `sh install.unity` command to proceed with the installation.
+ **Note**: "isDefaultArray" parameter in values.yaml and secret.json should match each other.
+
+6. Setup for snapshots
+
+ The Kubernetes Volume Snapshot feature is now beta in Kubernetes v1.17.
+
+ * The following section summarizes the changes in the **[beta]()** release.
+
+ In order to use the Kubernetes Volume Snapshot feature, you must ensure the following components have been deployed on your Kubernetes cluster.
+
+ * [Install Snapshot Beta CRDs using the following command]()
+ ```shell script
+ kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/release-2.0/config/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
+ kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/release-2.0/config/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
+ kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/release-2.0/config/crd/snapshot.storage.k8s.io_volumesnapshots.yaml
+
+ * [Volume snapshot controller]()
+ ```shell script
+ kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/master/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml
+ kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/master/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml
+ ```
+ After executing these commands, a snapshot-controller pod should be up and running.
+
+7. Run the `./csi-install.sh --namespace unity --values ./myvalues.yaml` command to proceed with the installation.
A successful installation should emit messages that look similar to the following samples:
```
- sh install.unity
- Kubernetes version v1.16.8
- Kubernetes master nodes: 10.*.*.*
- Kubernetes minion nodes:
- Verifying the feature gates.
- Installing using helm version 3
- NAME: unity
- LAST DEPLOYED: Thu May 14 05:05:42 2020
- NAMESPACE: unity
- STATUS: deployed
- REVISION: 1
- TEST SUITE: None
- Thu May 14 05:05:53 EDT 2020
- running 2 / 2
- NAME READY STATUS RESTARTS AGE
- unity-controller-0 4/4 Running 0 11s
- unity-node-mkbxc 2/2 Running 0 11s
- CSIDrivers:
- NAME CREATED AT
- unity 2020-05-14T09:05:42Z
- CSINodes:
- NAME CREATED AT
- 2020-04-16T20:59:16Z
- StorageClasses:
- NAME PROVISIONER AGE
- unity (default) csi-unity.dellemc.com 11s
- unity-iscsi csi-unity.dellemc.com 11s
- unity-nfs csi-unity.dellemc.com 11s
- unity--fc csi-unity.dellemc.com 11s
- unity--iscsi csi-unity.dellemc.com 11s
- unity--nfs csi-unity.dellemc.com 11s
+ ------------------------------------------------------
+ > Installing CSI Driver: csi-unity on 1.18
+ ------------------------------------------------------
+ ------------------------------------------------------
+ > Checking to see if CSI Driver is already installed
+ ------------------------------------------------------
+ ------------------------------------------------------
+ > Verifying Kubernetes and driver configuration
+ ------------------------------------------------------
+ |- Kubernetes Version: 1.18
+ |
+ |- Driver: csi-unity
+ |
+ |- Verifying Kubernetes versions
+ |
+ |--> Verifying minimum Kubernetes version Success
+ |
+ |--> Verifying maximum Kubernetes version Success
+ |
+ |- Verifying that required namespaces have been created Success
+ |
+ |- Verifying that required secrets have been created Success
+ |
+ |- Verifying that required secrets have been created Success
+ |
+ |- Verifying snapshot support
+ |
+ |--> Verifying that beta snapshot CRDs are available Success
+ |
+ |--> Verifying that beta snapshot controller is available Success
+ |
+ |- Verifying helm version Success
+
+ ------------------------------------------------------
+ > Verification Complete
+ ------------------------------------------------------
+ |
+ |- Installing Driver Success
+ |
+ |--> Waiting for statefulset unity-controller to be ready Success
+ |
+ |--> Waiting for daemonset unity-node to be ready Success
+ ------------------------------------------------------
+ > Operation complete
+ ------------------------------------------------------
```
Results
- At the end of the script, the kubectl get pods -n unity is called to GET the status of the pods and you will see the following:
- * unity-controller-0 with 4/4 containers ready, and status displayed as Running.
+ At the end of the script statefulset unity-controller and daemonset unity-node will be ready, execute command **kubectl get pods -n unity** to get the status of the pods and you will see the following:
+ * unity-controller-0 with 5/5 containers ready, and status displayed as Running.
* Agent pods with 2/2 containers and the status displayed as Running.
- Finally, the script lists the created storageclasses such as, "unity". Additional storage classes can be created for different combinations of file system types and Unity storage pools. The script also creates volumesnapshotclass "unity-snapclass".
+ Finally, the script creates storageclasses such as, "unity". Additional storage classes can be created for different combinations of file system types and Unity storage pools. The script also creates volumesnapshotclass "unity-snapclass".
## Certificate validation for Unisphere REST API calls
@@ -320,92 +328,41 @@ If the Unisphere certificate is self-signed or if you are using an embedded Unis
`kubectl create secret generic unity-certs-0 -n unity --from-file=cert-0=ca_cert_0.pem -o yaml --dry-run | kubectl replace -f -`
3. Repeat step-1 & 2 to create multiple cert secrets with incremental index (ex: unity-certs-1, unity-certs-2, etc)
-Note: User can add multiple certificates in the same secret. The certificate file should not exceed more than 1Mb due to kubernetes secret size limitation.
+**Note**: "unity" is the namespace for helm based installation but namespace can be user defined in operator based installation.
+
+**Note**: User can add multiple certificates in the same secret. The certificate file should not exceed more than 1Mb due to kubernetes secret size limitation.
-Note: Whenever certSecretCount parameter changes in myvalues.yaml user needs to uninstall and install the driver.
+**Note**: Whenever certSecretCount parameter changes in myvalues.yaml user needs to uninstall and install the driver.
## Upgrade CSI Driver for Unity
Preparing myvalues.yaml is the same as explained above.
-**Note** Supported upgrade path is from CSI Driver for Dell EMC Unity v1.1.0.1 to CSI Driver for Dell EMC Unity v1.2. If user is in v1.0 or v1.1, please upgrade to v1.1.0.1 before upgrading to v1.2 to avoid problems.
+To upgrade the driver from csi-unity v1.2.1 in k8s 1.16 to csi-unity 1.3 in k8s 1.17:
+1. Remove all volume snapshots, volume snapshot content and volume snapshot class objects.
+2. Upgrade the Kubernetes version to 1.17 first before upgrading CSI driver.
+3. Uninstall existing driver.
+4. Uninstall alpha snapshot CRDs.
+5. Verify all pre-reqs to install csi-unity v1.3 are fulfilled.
+6. Install the driver using installation steps from [here](#install-csi-driver-for-unity)
-Delete the unity-creds secret and recreate again using secret.json as explained above.
+**Note**: User has to re-create existing custom-storage classes (if any) according to latest (v1.3) format.
-
-Execute the following command to not to delete the unity-creds secret by helm
-
-```kubectl annotate secret unity-creds -n unity "helm.sh/resource-policy"=keep```
-
-Make sure unity-certs-* secrets are created properly before upgrading the driver.
-
-Run the `sh upgrade.unity` command to proceed with the upgrading process.
-
-**Note**: Upgrading CSI Unity driver is possible within the same version of Helm. (Ex: Helm V2 to Helm V2)
-
-**Note**: Sometimes user might get a warning saying "updates to parameters are forbidden" when we try to upgrade from previous versions. Delete the storage classes and upgrade the driver.
-
-A successful upgrade should emit messages that look similar to the following samples:
-
- ```
- $ ./upgrade.unity
- Kubernetes version v1.16.8
- Kubernetes master nodes: 10.*.*.*
- Kubernetes minion nodes:
- Verifying the feature gates.
- node-1's password:
- lifecycle present :2
- Removing lifecycle hooks from daemonset
- daemonset.extensions/unity-node patched
- daemonset.extensions/unity-node patched
- daemonset.extensions/unity-node patched
- warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
- pod "unity-node-t1j5h" force deleted
- Thu May 14 05:05:53 EDT 2020
- running 2 / 2
- NAME READY STATUS RESTARTS AGE
- unity-controller-0 4/4 Running 0 12s
- unity-node-n14gj 2/2 Running 0 12s
- Upgrading using helm version 3
- Release "unity" has been upgraded. Happy Helming!
- NAME: unity
- LAST DEPLOYED: Thu May 14 05:05:53 2020
- NAMESPACE: unity
- STATUS: deployed
- REVISION: 2
- TEST SUITE: None
- Thu May 14 05:06:02 EDT 2020
- running 2 / 2
- NAME READY STATUS RESTARTS AGE
- unity-controller-0 4/4 Running 0 11s
- unity-node-rn6px 2/2 Running 0 11s
- CSIDrivers:
- NAME CREATED AT
- unity 2020-04-23T09:25:01Z
- CSINodes:
- NAME CREATED AT
- 2020-04-16T20:59:16Z
- StorageClasses:
- NAME PROVISIONER AGE
- unity (default) csi-unity.dellemc.com 11s
- unity-iscsi csi-unity.dellemc.com 11s
- unity-nfs csi-unity.dellemc.com 11s
- unity--fc csi-unity.dellemc.com 11s
- unity--iscsi csi-unity.dellemc.com 11s
- unity--nfs csi-unity.dellemc.com 11s
- ```
-
- User has to re-create existing custom-storage classes (if any) according to latest (v1.2) format.
-
-## Migrate from Helm 2 to Helm 3
-1. Get the latest code from github.com/dell/csi-unity by executing the following command.
- `git clone -b v1.1.0.1 https://github.com/dell/csi-unity.git`
-2. Uninstall the CSI Driver for Dell EMC Unity v1.0 or v1.1 using the uninstall.unity script under csi-unity/helm using Helm 2.
-3. Go to https://helm.sh/docs/topics/v2_v3_migration/ and follow the instructions to migrate from Helm 2 to Helm 3.
-4. Once Helm 3 is ready, install the CSI Driver for Dell EMC Unity v1.1.0.1 using install.unity script under csi-unity/helm.
-5. List the pods with the following command (to verify the status)
-
- `kubectl get pods -n unity`
+## Building the driver image (UBI)
+**NOTE** : Only RHEL host can be used to build the driver image.
+1. Make sure podman is installed in node.
+2. Add the fully-qualified name of the image repository to the [registries.insecure]
+ section of the /etc/containers/registries.conf file. For example:
+ ```
+ [registries.insecure]
+ registries = ['myregistry.example.com']
+ ```
+2. Inside csi-unity directory, execute this command to build the image and this image can be used locally:\
+ `make podman-build`
+3. Tag the image generated to the desired repository with command:\
+ `podman tag IMAGE_NAME:IMAGE_TAG IMAGE_REPO/IMAGE_REPO_NAMESPACE/IMAGE_NAME:IMAGE_TAG`
+4. To push the image to the repository, execute command:\
+ `podman push IMAGE_REPO/IMAGE_REPO_NAMESPACE/IMAGE_NAME:IMAGE_TAG`
## Test deploying a simple pod with Unity storage
Test the deployment workflow of a simple pod on Unity storage.
@@ -481,21 +438,21 @@ Test the deployment workflow of a simple pod on Unity storage.
**Note**: Verify unity system for volume to be attached to the Host where the nginx container is running
4. **Create Snapshot**
-
- The following procedure will create a snapshot of the volume in the container using VolumeSnapshot objects defined in snap.yaml. The following are the contents of snap.yaml.
+ The following procedure will create a snapshot of the volume in the container using VolumeSnapshot objects defined in snap.yaml.
+ The following are the contents of snap.yaml.
*snap.yaml*
```
- apiVersion: snapshot.storage.k8s.io/v1alpha1
+ apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
- name: testvolclaim1-snap1
+ name: testvolclaim1-snap1
+ namespace: default
spec:
- snapshotClassName: unity-snapclass
- source:
- name: testvolclaim1
- kind: PersistentVolumeClaim
+ volumeSnapshotClassName: unity-snapclass
+ source:
+ persistentVolumeClaimName: testvolclaim1
```
Execute the following command to create snapshot
@@ -524,6 +481,7 @@ Test the deployment workflow of a simple pod on Unity storage.
Delete the Nginx application to unattach the volume from host
`kubectl delete -f nginx.yaml`
+
7. **To delete the volume**
```
@@ -531,7 +489,89 @@ Test the deployment workflow of a simple pod on Unity storage.
kubectl delete pvc testvolclaim1
kubectl get pvc
```
-## Static volume creation
+
+8. **Volume Expansion**
+
+ To expand a volume, execute the following command to edit the pvc:
+ ```
+ kubectl edit pvc pvc-name
+ ```
+ Then, edit the "storage" field in spec section with required new size:
+ ```
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 10Gi #This field is updated from 5Gi to 10Gi which is required new size
+ ```
+ **Note**: Make sure the storage class used to create the pvc have allowVolumeExpansion field set to true. The new size cannot be less than the existing size of pvc.
+
+9. **Create Volume Clone**
+
+ Create a file (`clonepvc.yaml`) with the following content.
+
+ ```
+ apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: clone-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 5Gi
+ dataSource:
+ kind: PersistentVolumeClaim
+ name: source-pvc
+ storageClassName: unity
+ ```
+
+ Execute the following command to create volume clone
+ ```
+ kubectl create -f $PWD/clonepvc.yaml
+ ```
+ **Note**: Size of clone pvc must be equal to size of source pvc.
+
+ **Note**: For NFS protocol, user cannot expand cloned pvc.
+
+ **Note**: For NFS protocol, deletion of source pvc is not permitted if cloned pvc exists.
+
+10. **Create Volume From Snapshot**
+
+ Create a file (`pvcfromsnap.yaml`) with the following content.
+
+ ```
+ apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: pvcfromsnap
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 5Gi
+ dataSource:
+ kind: VolumeSnapshot
+ name: source-snapshot
+ apiGroup: snapshot.storage.k8s.io
+ storageClassName: unity
+ ```
+
+ Execute the following command to create volume clone
+ ```
+ kubectl create -f $PWD/pvcfromsnap.yaml
+ ```
+ **Note**: Size of created pvc from snapshot must be equal to size of source snapshot.
+
+ **Note**: For NFS protocol, pvc created from snapshot can not be expanded.
+
+ **Note**: For NFS protocol, deletion of source pvc is not permitted if created pvc from snapshot exists.
+
+## Static volume creation (Volume ingestion)
+
Static provisioning is a feature that is native to Kubernetes and that allows cluster administrators to make existing storage devices available to a cluster.
As a cluster administrator, you must know the details of the storage device, its supported configurations, and mount options.
@@ -545,6 +585,8 @@ apiVersion: v1
kind: PersistentVolume
metadata:
name: static-pv
+ annotations:
+ pv.kubernetes.io/provisioned-by: csi-unity.dellemc.com
spec:
accessModes:
- ReadWriteOnce
@@ -553,6 +595,7 @@ spec:
csi:
driver: csi-unity.dellemc.com
volumeHandle: csivol-vol-name-FC-apm001234567-sv_12
+ fsType: xfs
persistentVolumeReclaimPolicy: Delete
claimRef:
namespace: default
@@ -565,7 +608,7 @@ spec:
*\-\-\-\*
* volume-name: Name of the volume. Can have any number of "-"
-* Possible values for "Protocol" are "FC", "ISCSI" and "NFS"
+* Possible values for "Protocol" are "FC", "iSCSI" and "NFS"
* arrayid: arrayid defined in lower case
* volume id: Represents the the LUN cli-id or Filesystem ID (not the resource-id incase of filesystem)
@@ -581,6 +624,7 @@ spec:
resources:
requests:
storage: 5Gi
+ storageClassName: unity
```
4. Create Pod
@@ -606,22 +650,86 @@ spec:
claimName: myclaim
```
+## Snapshot ingestion
+Snapshot ingestion is a feature that allows cluster administrators to make existing snapshot on array, created by user available to a cluster.
+
+To make existing snapshot available to a cluster user, user must manually create or use existing snapshot in Unisphere for PV.
+
+1. Create a snapshot or identify existing snapshot using Unisphere
+
+2. Create a VolumeSnapshotContent explained below
+```yaml
+apiVersion: snapshot.storage.k8s.io/v1beta1
+kind: VolumeSnapshotContent
+metadata:
+ name: manual-snapshot-content
+spec:
+ deletionPolicy: Delete
+ driver: csi-unity.dellemc.com
+ volumeSnapshotClassName: unity-snapclass
+ source:
+ snapshotHandle: snap1-FC-apm00175000000-38654806278
+ volumeSnapshotRef:
+ name: manual-snapshot
+ namespace: default
+```
+
+**"snapshotHandle"** is the key parameter that contains four sections.
+
+ 1. Snapshot name (unused)
+ 2. Type of snapshot (unused and if specified it should be FC/iSCSI/NFS)
+ 3. Arrays id ex: apm00175000000
+ 4. Snapshot id ex:38654806278
+
+
+3. Create a VolumeSnapshot
+
+```yaml
+apiVersion: snapshot.storage.k8s.io/v1beta1
+kind: VolumeSnapshot
+metadata:
+ name: manual-snapshot
+spec:
+ volumeSnapshotClassName: unity-snapclass
+ source:
+ volumeSnapshotContentName: manual-snapshot-content
+```
+
+4. Ingestion is completed in the above steps and user can perform Clone volume or Create Volume from Snapshot from VolumeSnapshot created from VolumeSnapshotContent.
+
+Ex: Create volume from VolumeSnapshot
+
+```yaml
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: restore-pvc-from-snap
+spec:
+ storageClassName: unity
+ dataSource:
+ name: manual-snapshot
+ kind: VolumeSnapshot
+ apiGroup: snapshot.storage.k8s.io
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 5Gi
+```
## Dynamically update the unity-creds secrets
Users can dynamically add delete array information from secret. Whenever an update happens the driver updates the "Host" information in an array.
User can update secret using the following command.
- `kubectl create secret generic unity-creds -n unity --from-file=config=secret.json -o yaml --dry-run | kubectl replace -f - `
+ `kubectl create secret generic unity-creds -n unity --from-file=config=secret.json -o yaml --dry-run=client | kubectl replace -f - `
-* Note: * Updating unity-certs-x secrets is a manual process, unlike unity-creds. Users have to re-install the driver in case of updating/adding the SSL certificates or changing the certSecretCount parameter.
+**Note**: Updating unity-certs-x secrets is a manual process, unlike unity-creds. Users have to re-install the driver in case of updating/adding the SSL certificates or changing the certSecretCount parameter.
-## Install CSI-Unity driver using dell-csi-operator in OpenShift
+## Install CSI-Unity driver using dell-csi-operator in OpenShift / upstream Kubernetes
CSI Driver for Dell EMC Unity can also be installed via the new Dell EMC Storage Operator.
-Note: Currently, csi-unity v1.1.0.1 is supported using csi-operator. Use helm-v3 to install csi-driver v1.2 for OpenShift
-
-The Dell EMC Storage CSI Operator is a Kubernetes Operator, which can be used to install and manage the CSI Drivers provided by Dell EMC for various storage platforms. This operator is available as a community operator for upstream Kubernetes and can be deployed using OperatorHub.io. It is also available as a community operator for OpenShift clusters and can be deployed using OpenShift Container Platform. Both these methods of installation use OLM (Operator Lifecycle Manager).
+The Dell EMC Storage CSI Operator is a Kubernetes Operator, which can be used to install and manage the CSI Drivers provided by Dell EMC for various storage platforms. This operator is available as a community operator for upstream Kubernetes and can be deployed using https://operatorhub.io/operator/dell-csi-operator . It is also available as a community operator for OpenShift clusters and can be deployed using OpenShift Container Platform. Both upstream kubernetes and openshift uses OLM(Operator Lifecycle Manager) as well as manual installation.
The operator can also be deployed directly by following the instructions available here - https://github.com/dell/dell-csi-operator
@@ -629,86 +737,81 @@ There are sample manifests provided, which can be edited to do an easy installat
Kubernetes Operators make it easy to deploy and manage the entire lifecycle of complex Kubernetes applications. Operators use Custom Resource Definitions (CRD), which represents the application and use custom controllers to manage them.
-### Listing CSI-Unity drivers
-User can query for csi-unity driver using the following command
-`kubectl get csiunity --all-namespaces`
-
### Procedure to create new CSI-Unity driver
1. Create namespace
- Run `kubectl create namespace unity` to create the unity namespace.
+ Run `kubectl create namespace test-unity` to create the a namespace called test-unity. It can be any user-defined name.
2. Create *unity-creds*
- Create a file called unity-creds.yaml with the following content
- ```yaml
- apiVersion: v1
- kind: Secret
- metadata:
- name: unity-creds
- namespace: unity
- type: Opaque
- data:
- # set username to the base64 encoded username
- username:
- # set password to the base64 encoded password
- password:
- ```
+ Create secret mentioned in [Install csi-driver](#install-csi-driver-for-unity) section. The secret should be created in user-defined namespace (test-unity, in this case)
+
+3. Create certificate secrets
+
+ As part of the CSI driver installation, the CSI driver requires a secret with the name unity-certs-0 to unity-certs-n in the user-defined namespace (test-unity, in this case)
+ Create certificate procedure explained in the [link](#certificate-validation-for-unisphere-rest-api-calls)
- Replace the values for the username and password parameters. These values can be optioned using base64 encoding as described in the following example:
- ```
- echo -n "myusername" | base64
- echo -n "mypassword" | base64
- ```
+ **Note**: *'certSecretCount'* parameter is not required for operator. Based on secret name pattern (unity-certs-*) operator reads all the secrets.
+ Secret name suffix should have 0 to N order to read the secrets. Secrets will not be considered, if any number missing in suffix.
- Run `kubectl create -f unity-creds.yaml` command to create the secret
-
-3. Create a CR (Custom Resource) for unity using the sample provided below
-Create a new file `csiunity.yaml` with the following content.
+ Ex: If unity-certs-0, unity-certs-1, unity-certs-3 are present in the namespace, then only first two secrets are considered for SSL verification.
+
+4. Create a CR (Custom Resource) for unity using the sample provided below
+
+Create a new file `csiunity.yaml` by referring the following content. Replace the given sample values according to your environment. You can find may CRDs under deploy/crds folder when you install dell-csi-operator
```yaml
apiVersion: storage.dell.com/v1
kind: CSIUnity
metadata:
- name: unity
- namespace: unity
+ name: test-unity
+ namespace: test-unity
spec:
driver:
- configVersion: v1
+ configVersion: v2
+ certSecretCount: 1
replicas: 1
+ sideCars:
+ -
+ name: snapshotter
+ snapshotClass:
+ -
+ name: test-snap
+ parameters:
+ retentionDuration: ""
common:
- image: "dellemc/csi-unity:v1.1.0.000R"
+ image: "dellemc/csi-unity:v1.3.0.000R"
imagePullPolicy: IfNotPresent
envs:
- name: X_CSI_UNITY_DEBUG
value: "true"
- - name: X_CSI_UNITY_ENDPOINT
- value: "https://"
- - name: X_CSI_UNITY_INSECURE
- value: "true"
storageClass:
- - name: fc
+ - name: virt2016****-fc
default: true
reclaimPolicy: "Delete"
parameters:
storagePool: pool_1
+ arrayId: "VIRT2016****"
protocol: "FC"
- - name: iscsi
+ - name: virt2017****-iscsi
reclaimPolicy: "Delete"
parameters:
storagePool: pool_1
+ arrayId: "VIRT2017****"
protocol: "iSCSI"
snapshotClass:
- - name: snapshot
- parameters:
- retentionDuration: "1:1:1:1"
+ - name: test-snap
+ parameters:
+ retentionDuration: ""
```
-4. Execute the following command to create unity custom resource
+5. Execute the following command to create unity custom resource
```kubectl create -f csiunity.yaml```
- The above command will deploy the csi-unity driver
+ The above command will deploy the csi-unity driver in the test-unity namespace.
+
+6. Any deployment error can be found out by loggin the operator pod which is in default namespace (e.g., kubectl logs dell-csi-operator-64c58559f6-cbgv7)
-5. User can configure the following parameters in CR
+7. User can configure the following parameters in CR
The following table lists the primary configurable parameters of the Unity driver chart and their default values.
@@ -717,8 +820,6 @@ Create a new file `csiunity.yaml` with the following content.
| ***Common parameters for node and controller*** |
| CSI_ENDPOINT | Specifies the HTTP endpoint for Unity. | No | /var/run/csi/csi.sock |
| X_CSI_DEBUG | To enable debug mode | No | false |
- | X_CSI_UNITY_ENDPOINT | Must provide a UNITY HTTPS unisphere url. | Yes | |
- | X_CSI_UNITY_INSECURE | Specifies that the Unity's hostname and certificate chain | No | true |
| GOUNITY_DEBUG | To enable debug mode for gounity library| No | false |
| ***Controller parameters*** |
| X_CSI_MODE | Driver starting mode | No | controller|
@@ -727,63 +828,23 @@ Create a new file `csiunity.yaml` with the following content.
| X_CSI_MODE | Driver starting mode | No | node|
| X_CSI_ISCSI_CHROOT | Path to which the driver will chroot before running any iscsi commands. | No | /noderoot |
-## Install csi-unity driver in OpenShift using HELM v3.x
-
-1. Clone the git repository. ( `git clone https://github.com/dell/csi-unity.git`)
-
-2. Change the directory to ./helm
-
-3. Create a namespace "unity" in kubernetes cluster
-
-4. Create unity-cert-0 to unity-cert-n secrets as explained in the previous sections.
-
-5. Create unity-creds secret using the secret.json explained in the previous sections.
-
-6. Create clusterrole (unity-node) with the following yaml
-
-```yaml
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: unity-node
-rules:
- - apiGroups:
- - security.openshift.io
- resourceNames:
- - privileged
- resources:
- - securitycontextconstraints
- verbs:
- - use
-```
-
-7. Create clusterrolebinding (unity-node) with the following yaml
-```yaml
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: unity-node
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: unity-node
-subjects:
- - kind: ServiceAccount
- name: unity-node
- namespace: unity
-```
+### Listing CSI-Unity drivers
+ User can query for csi-unity driver using the following commands
+ `kubectl get csiunity --all-namespaces`
+ `kubectl get pods -n `
-8. Execute the following command to install the driver.
+ In addition , user can enter the following command to make sure operator is running
-`helm install unity --values myvalues.yaml --values csi-unity/k8s-1.16-values.yaml -n unity ./csi-unity`
+ `kubectl get pods`
-Note: Preparing myvalues.yaml and secret.json is same as explained in the previous sections
+ The above command should display a pod whose name starts with dell-csi-operator running on a default namespace.
-## Support
-The CSI Driver for Dell EMC Unity image available on Dockerhub is officially supported by Dell EMC.
-
-The source code available on Github is unsupported and provided solely under the terms of the license attached to the source code. For clarity, Dell EMC does not provide support for any source code modifications.
-
-For any CSI driver setup, configuration issues, questions or feedback, join the Dell EMC Container community at https://www.dell.com/community/Containers/bd-p/Containers
-
-For any Dell EMC storage issues, please contact Dell support at: https://www.dell.com/support.
+ To upgrade the driver from csi-unity v1.2.1 in OpenShift 4.3 (Installed using Helm) to csi-unity v1.3 in OpenShift 4.3:
+ 1. Uninstall the existing csi-unity v1.2.1 driver using Helm's uninstall.unity script.
+ 2. Install operator using the instructions provided in https://github.com/dell/dell-csi-operator.
+ 3. Create CR by taking the reference from /deploy/crds/unity_v130_ops_43.yaml.
+ 4. User can install csi-unity v1.3 in previous namespace (unity) or user can install in the new namespace.
+ 5. Install csi-unity v1.3 driver using the operator v1.1 by creating the object (E.g., kubectl create -f
+ unity_v130_ops_43.yaml).
+ 6. Please note that , volumesnapshotclass will not be created as part of this installation and no volume
+ snapshot related operation can be performed on this combination (csi-unity v1.3 and OpenShift 4.3).
\ No newline at end of file
diff --git a/build.sh b/build.sh
new file mode 100644
index 00000000..934f50b2
--- /dev/null
+++ b/build.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+# This script will build an image for the Unity CSI Driver
+# Before running this script, make sure that you have podman installed on your system
+# If you are going to push the image to an image repo, make sure that you are logged in
+# sh build.sh: build the image
+# sh build.sh -p: build and push the image
+
+function git_version {
+ local gitdesc=$(git describe --long)
+ local version="${gitdesc%%-*}"
+ MAJOR_VERSION=$(echo $version | cut -d. -f1)
+ MINOR_VERSION=$(echo $version | cut -d. -f2)
+ PATCH_NUMBER=$(echo $version | cut -d. -f3)
+ BUILD_NUMBER_FROM_GIT=$(sed -e 's#.*-\(\)#\1#' <<< "${gitdesc%-*}")
+ echo MAJOR_VERSION=$MAJOR_VERSION MINOR_VERSION=$MINOR_VERSION PATCH_NUMBER=$PATCH_NUMBER BUILD_NUMBER_FROM_GIT=$BUILD_NUMBER_FROM_GIT
+ echo Target Version=$VERSION
+}
+
+function build_image {
+ echo $BUILDCMD build -t ${IMAGE_NAME}:${IMAGE_TAG} .
+ (cd .. && $BUILDCMD build -t ${IMAGE_NAME}:${IMAGE_TAG} --build-arg GOPROXY=$GOPROXY -f csi-unity/Dockerfile . --format=docker)
+ echo $BUILDCMD tag ${IMAGE_NAME}:${IMAGE_TAG} ${IMAGE_REPO}/${IMAGE_REPO_NAMESPACE}/${IMAGE_NAME}:${IMAGE_TAG}
+ $BUILDCMD tag ${IMAGE_NAME}:${IMAGE_TAG} ${IMAGE_REPO}/${IMAGE_REPO_NAMESPACE}/${IMAGE_NAME}:${IMAGE_TAG}
+}
+
+function push_image {
+ echo $BUILDCMD push ${IMAGE_REPO}/${IMAGE_REPO_NAMESPACE}/${IMAGE_NAME}:${IMAGE_TAG}
+ $BUILDCMD push ${IMAGE_REPO}/${IMAGE_REPO_NAMESPACE}/${IMAGE_NAME}:${IMAGE_TAG}
+}
+
+NAME=csi-unity
+IMAGE_NAME=${NAME}-${USER}
+VERSION=$(date +%Y%m%d%H%M%S)
+BIN_DIR=bin
+BIN_NAME=${NAME}
+IMAGE_REPO=amaas-eos-mw1.cec.lab.emc.com:5028
+IMAGE_REPO_NAMESPACE=csi-unity
+IMAGE_TAG=${VERSION}
+
+# Read options
+while getopts 'ph' flag; do
+ case "${flag}" in
+ p) PUSH_IMAGE='true' ;;
+ h) git_version
+ exit 0 ;;
+ *) git_version
+ exit 0 ;;
+ esac
+done
+
+BUILDCMD="podman"
+DOCKEROPT="--format=docker"
+set -e
+
+command -v podman
+if [ $? -eq 0 ]; then
+ echo "Using podman for building image"
+else
+ echo "podman must be installed for building UBI based image"
+ exit 1
+fi
+
+# Build the image
+build_image
+
+if [ "$PUSH_IMAGE" = true ]; then
+ push_image
+fi
+
+exit 0
\ No newline at end of file
diff --git a/dell-csi-helm-installer/README.md b/dell-csi-helm-installer/README.md
new file mode 100644
index 00000000..698e3919
--- /dev/null
+++ b/dell-csi-helm-installer/README.md
@@ -0,0 +1,157 @@
+# Helm Installer for Dell EMC CSI Storage Providers
+
+## Description
+
+This directory provides scripts to install, upgrade, uninstall the CSI drivers, and to verify the Kubernetes environment.
+These same scripts are present in all Dell EMC Container Storage Interface ([CSI](https://github.com/container-storage-interface/spec)) drivers. This includes the drivers for:
+* [PowerFlex](https://github.com/dell/csi-vxflexos)
+* [PowerMax](https://github.com/dell/csi-powermax)
+* [PowerScale](https://github.com/dell/csi-powerscale)
+* [PowerStore](https://github.com/dell/csi-powerstore)
+* [Unity](https://github.com/dell/csi-unity)
+
+NOTE: This documentation uses the Unity driver as an example. If working with a different driver, substitute the name as appropriate.
+
+## Dependencies
+
+Installing any of the Dell EMC CSI Drivers requires a few utilities to be installed on the system running the installation.
+
+| Dependency | Usage |
+| ------------- | ----- |
+| `kubectl` | Kubectl is used to validate that the Kubernetes system meets the requirements of the driver. |
+| `helm` | Helm v3 is used as the deployment tool for Charts. See, [Install HELM 3](https://helm.sh/docs/intro/install/) for instructions to install HELM 3. |
+
+
+In order to use these tools, a valid `KUBECONFIG` is required. Ensure that either a valid configuration is in the default location or that the `KUBECONFIG` environment variable points to a valid confiugration before using these tools.
+
+## Capabilities
+
+This project provides the following capabilitites, each one is discussed in detail later in this document.
+
+* Install a driver. When installing a driver, options are provided to specify the target namespace as well as options to control the types of verifications to be performed on the target system.
+* Upgrade a driver. Upgrading a driver is an effective way to either deploy a new version of the driver or to modify the parameters used in an initial deployment.
+* Uninstall a driver. This removes the driver and any installed storage classes.
+* Verify a Kubernetes system for suitability with a driver. These verification steps differ, slightly, from driver to driver but include verifiying version compatibility, namespace availability, existance of required secrets, and validating worker node compatibility with driver protocols such as iSCSI, Fibre Channel, NFS, etc
+
+
+Most of these usages require the creation/specification of a values file. These files specify configuration settings that are passed into the driver and configure it for use. To create one of these files, the following steps should be followed:
+1. Copy a template file for the driver to a new location, naming this new file is at the users discretion. The template files are always found within the driver repo at `helm/csi-/values.yaml`
+2. Edit the file such that it contains the proper configuration settings for the specific environment. These files are yaml formatted so maintaining the file structure is important.
+
+For example, to create a values file for the Unity driver the following steps can be executed
+```
+# cd to the installation script directory
+cd dell-csi-helm-installer
+
+# copy the template file
+cp ../helm/csi-unity/values.yaml ./my-unity-settings.yaml
+
+# edit the newly created values file
+vi my-unity-settings.yaml
+```
+
+These values files can then be archived for later reference or for usage when upgrading the driver.
+
+
+### Install A Driver
+
+Installing a driver is performed via the `csi-install.sh` script. This script requires a few arguments: the target namespace and the user created values file. By default, this will verify the Kubernetes environment and present a list of warnings and/or errors. Errors must be addressed before installing, warning should be examined for their applicability. For example, in order to install the Unity driver into a namespace called "unity", the following command should be run:
+```
+./csi-install.sh --namespace unity --values ./my-unity-settings.yaml
+```
+
+For usage information:
+```
+[dell-csi-helm-installer]# ./csi-install.sh -h
+Help for ./csi-install.sh
+
+Usage: ./csi-install.sh options...
+Options:
+ Required
+ --namespace[=] Kubernetes namespace containing the CSI driver
+ --values[=] Values file, which defines configuration values
+ Optional
+ --release[=] Name to register with helm, default value will match the driver name
+ --upgrade Perform an upgrade of the specified driver, default is false
+ --node-verify-user[=] Username to SSH to worker nodes as, used to validate node requirements. Default is root
+ --skip-verify Skip the kubernetes configuration verification to use the CSI driver, default will run verification
+ --skip-verify-node Skip worker node verification checks
+ --snapshot-crd Install snapshot CRDs. Default will not install Snapshot classes.
+ -h Help
+```
+
+### Upgrade A Driver
+
+Upgrading a driver is very similar to installation. The `csi-install.sh` script is run, with the same required arguments, along with a `--upgrade` argument. For example, to upgrade the previously installed Unity driver, the following command can be supplied:
+
+```
+./csi-install.sh --namespace unity --values ./my-unity-settings.yaml --upgrade
+```
+
+For usage information:
+```
+[dell-csi-helm-installer]# ./csi-install.sh -h
+Help for ./csi-install.sh
+
+Usage: ./csi-install.sh options...
+Options:
+ Required
+ --namespace[=] Kubernetes namespace containing the CSI driver
+ --values[=] Values file, which defines configuration values
+ Optional
+ --release[=] Name to register with helm, default value will match the driver name
+ --upgrade Perform an upgrade of the specified driver, default is false
+ --node-verify-user[=] Username to SSH to worker nodes as, used to validate node requirements. Default is root
+ --skip-verify Skip the kubernetes configuration verification to use the CSI driver, default will run verification
+ --skip-verify-node Skip worker node verification checks
+ --snapshot-crd Install snapshot CRDs. Default will not install Snapshot classes.
+ -h Help
+```
+
+### Uninstall A Driver
+
+To uninstall a driver, the `csi-uninstall.sh` script provides a handy wrapper around the `helm` utility. The only required argument for uninstallation is the namespace name. To uninstall the Unity driver:
+
+```
+./csi-uninstall.sh --namespace unity
+```
+
+For usage information:
+```
+[dell-csi-helm-installer]# ./csi-uninstall.sh -h
+Help for ./csi-uninstall.sh
+
+Usage: ./csi-uninstall.sh options...
+Options:
+ Required
+ --namespace[=] Kubernetes namespace to uninstall the CSI driver from
+ Optional
+ --release[=] Name to register with helm, default value will match the driver name
+ -h Help
+```
+
+### Verify A Kubernetes Environment
+
+The `verify.sh` script is run, automatically, as part of the installation and upgrade procedures and can also be run by itself. This provides a handy means to validate a Kubernetes system without meaning to actually perform the installation. To verify an environment, run `verify.sh` with the namespace name and values file options.
+
+```
+./verify.sh --namespace unity --values ./my-unity-settings.yaml
+```
+
+For usage information:
+```
+[dell-csi-helm-installer]# ./verify.sh -h
+Help for ./verify.sh
+
+Usage: ./verify.sh options...
+Options:
+ Required
+ --namespace[=] Kubernetes namespace to install the CSI driver
+ --values[=] Values file, which defines configuration values
+ Optional
+ --skip-verify-node Skip worker node verification checks
+ --release[=] Name to register with helm, default value will match the driver name
+ --node-verify-user[=] Username to SSH to worker nodes as, used to validate node requirements. Default is root
+ -h Help Help
+```
+
diff --git a/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml b/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
new file mode 100644
index 00000000..4aa980cc
--- /dev/null
+++ b/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
@@ -0,0 +1,85 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.2.5
+ api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/260"
+ creationTimestamp: null
+ name: volumesnapshotclasses.snapshot.storage.k8s.io
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .driver
+ name: Driver
+ type: string
+ - JSONPath: .deletionPolicy
+ description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass
+ should be deleted when its bound VolumeSnapshot is deleted.
+ name: DeletionPolicy
+ type: string
+ - JSONPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ group: snapshot.storage.k8s.io
+ names:
+ kind: VolumeSnapshotClass
+ listKind: VolumeSnapshotClassList
+ plural: volumesnapshotclasses
+ singular: volumesnapshotclass
+ preserveUnknownFields: false
+ scope: Cluster
+ subresources: {}
+ validation:
+ openAPIV3Schema:
+ description: VolumeSnapshotClass specifies parameters that a underlying storage
+ system uses when creating a volume snapshot. A specific VolumeSnapshotClass
+ is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses
+ are non-namespaced
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ deletionPolicy:
+ description: deletionPolicy determines whether a VolumeSnapshotContent created
+ through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot
+ is deleted. Supported values are "Retain" and "Delete". "Retain" means
+ that the VolumeSnapshotContent and its physical snapshot on underlying
+ storage system are kept. "Delete" means that the VolumeSnapshotContent
+ and its physical snapshot on underlying storage system are deleted. Required.
+ enum:
+ - Delete
+ - Retain
+ type: string
+ driver:
+ description: driver is the name of the storage driver that handles this
+ VolumeSnapshotClass. Required.
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: parameters is a key-value map with storage driver specific
+ parameters for creating snapshots. These values are opaque to Kubernetes.
+ type: object
+ required:
+ - deletionPolicy
+ - driver
+ type: object
+ version: v1beta1
+ versions:
+ - name: v1beta1
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml b/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
new file mode 100644
index 00000000..34c51ad6
--- /dev/null
+++ b/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
@@ -0,0 +1,233 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.2.5
+ api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/260"
+ creationTimestamp: null
+ name: volumesnapshotcontents.snapshot.storage.k8s.io
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.readyToUse
+ description: Indicates if a snapshot is ready to be used to restore a volume.
+ name: ReadyToUse
+ type: boolean
+ - JSONPath: .status.restoreSize
+ description: Represents the complete size of the snapshot in bytes
+ name: RestoreSize
+ type: integer
+ - JSONPath: .spec.deletionPolicy
+ description: Determines whether this VolumeSnapshotContent and its physical snapshot
+ on the underlying storage system should be deleted when its bound VolumeSnapshot
+ is deleted.
+ name: DeletionPolicy
+ type: string
+ - JSONPath: .spec.driver
+ description: Name of the CSI driver used to create the physical snapshot on the
+ underlying storage system.
+ name: Driver
+ type: string
+ - JSONPath: .spec.volumeSnapshotClassName
+ description: Name of the VolumeSnapshotClass to which this snapshot belongs.
+ name: VolumeSnapshotClass
+ type: string
+ - JSONPath: .spec.volumeSnapshotRef.name
+ description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent
+ object is bound.
+ name: VolumeSnapshot
+ type: string
+ - JSONPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ group: snapshot.storage.k8s.io
+ names:
+ kind: VolumeSnapshotContent
+ listKind: VolumeSnapshotContentList
+ plural: volumesnapshotcontents
+ singular: volumesnapshotcontent
+ preserveUnknownFields: false
+ scope: Cluster
+ subresources:
+ status: {}
+ validation:
+ openAPIV3Schema:
+ description: VolumeSnapshotContent represents the actual "on-disk" snapshot
+ object in the underlying storage system
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ spec:
+ description: spec defines properties of a VolumeSnapshotContent created
+ by the underlying storage system. Required.
+ properties:
+ deletionPolicy:
+ description: deletionPolicy determines whether this VolumeSnapshotContent
+ and its physical snapshot on the underlying storage system should
+ be deleted when its bound VolumeSnapshot is deleted. Supported values
+ are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent
+ and its physical snapshot on underlying storage system are kept. "Delete"
+ means that the VolumeSnapshotContent and its physical snapshot on
+ underlying storage system are deleted. In dynamic snapshot creation
+ case, this field will be filled in with the "DeletionPolicy" field
+ defined in the VolumeSnapshotClass the VolumeSnapshot refers to. For
+ pre-existing snapshots, users MUST specify this field when creating
+ the VolumeSnapshotContent object. Required.
+ enum:
+ - Delete
+ - Retain
+ type: string
+ driver:
+ description: driver is the name of the CSI driver used to create the
+ physical snapshot on the underlying storage system. This MUST be the
+ same as the name returned by the CSI GetPluginName() call for that
+ driver. Required.
+ type: string
+ source:
+ description: source specifies from where a snapshot will be created.
+ This field is immutable after creation. Required.
+ properties:
+ snapshotHandle:
+ description: snapshotHandle specifies the CSI "snapshot_id" of a
+ pre-existing snapshot on the underlying storage system. This field
+ is immutable.
+ type: string
+ volumeHandle:
+ description: volumeHandle specifies the CSI "volume_id" of the volume
+ from which a snapshot should be dynamically taken from. This field
+ is immutable.
+ type: string
+ type: object
+ volumeSnapshotClassName:
+ description: name of the VolumeSnapshotClass to which this snapshot
+ belongs.
+ type: string
+ volumeSnapshotRef:
+ description: volumeSnapshotRef specifies the VolumeSnapshot object to
+ which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName
+ field must reference to this VolumeSnapshotContent's name for the
+ bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent
+ object, name and namespace of the VolumeSnapshot object MUST be provided
+ for binding to happen. This field is immutable after creation. Required.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: 'If referring to a piece of an object instead of an
+ entire object, this string should contain a valid JSON/Go field
+ access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within
+ a pod, this would take on a value like: "spec.containers{name}"
+ (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]"
+ (container with index 2 in this pod). This syntax is chosen only
+ to have some well-defined way of referencing a part of an object.
+ TODO: this design is not final and this field is subject to change
+ in the future.'
+ type: string
+ kind:
+ description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ namespace:
+ description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
+ type: string
+ resourceVersion:
+ description: 'Specific resourceVersion to which this reference is
+ made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
+ type: string
+ uid:
+ description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
+ type: string
+ type: object
+ required:
+ - deletionPolicy
+ - driver
+ - source
+ - volumeSnapshotRef
+ type: object
+ status:
+ description: status represents the current information of a snapshot.
+ properties:
+ creationTime:
+ description: creationTime is the timestamp when the point-in-time snapshot
+ is taken by the underlying storage system. In dynamic snapshot creation
+ case, this field will be filled in with the "creation_time" value
+ returned from CSI "CreateSnapshotRequest" gRPC call. For a pre-existing
+ snapshot, this field will be filled with the "creation_time" value
+ returned from the CSI "ListSnapshots" gRPC call if the driver supports
+ it. If not specified, it indicates the creation time is unknown. The
+ format of this field is a Unix nanoseconds time encoded as an int64.
+ On Unix, the command `date +%s%N` returns the current time in nanoseconds
+ since 1970-01-01 00:00:00 UTC.
+ format: int64
+ type: integer
+ error:
+ description: error is the latest observed error during snapshot creation,
+ if any.
+ properties:
+ message:
+ description: 'message is a string detailing the encountered error
+ during snapshot creation if specified. NOTE: message may be logged,
+ and it should not contain sensitive information.'
+ type: string
+ time:
+ description: time is the timestamp when the error was encountered.
+ format: date-time
+ type: string
+ type: object
+ readyToUse:
+ description: readyToUse indicates if a snapshot is ready to be used
+ to restore a volume. In dynamic snapshot creation case, this field
+ will be filled in with the "ready_to_use" value returned from CSI
+ "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, this
+ field will be filled with the "ready_to_use" value returned from the
+ CSI "ListSnapshots" gRPC call if the driver supports it, otherwise,
+ this field will be set to "True". If not specified, it means the readiness
+ of a snapshot is unknown.
+ type: boolean
+ restoreSize:
+ description: restoreSize represents the complete size of the snapshot
+ in bytes. In dynamic snapshot creation case, this field will be filled
+ in with the "size_bytes" value returned from CSI "CreateSnapshotRequest"
+ gRPC call. For a pre-existing snapshot, this field will be filled
+ with the "size_bytes" value returned from the CSI "ListSnapshots"
+ gRPC call if the driver supports it. When restoring a volume from
+ this snapshot, the size of the volume MUST NOT be smaller than the
+ restoreSize if it is specified, otherwise the restoration will fail.
+ If not specified, it indicates that the size is unknown.
+ format: int64
+ minimum: 0
+ type: integer
+ snapshotHandle:
+ description: snapshotHandle is the CSI "snapshot_id" of a snapshot on
+ the underlying storage system. If not specified, it indicates that
+ dynamic snapshot creation has either failed or it is still in progress.
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ version: v1beta1
+ versions:
+ - name: v1beta1
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshots.yaml b/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshots.yaml
new file mode 100644
index 00000000..483706f1
--- /dev/null
+++ b/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshots.yaml
@@ -0,0 +1,188 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.2.5
+ api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/260"
+ creationTimestamp: null
+ name: volumesnapshots.snapshot.storage.k8s.io
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.readyToUse
+ description: Indicates if a snapshot is ready to be used to restore a volume.
+ name: ReadyToUse
+ type: boolean
+ - JSONPath: .spec.source.persistentVolumeClaimName
+ description: Name of the source PVC from where a dynamically taken snapshot will
+ be created.
+ name: SourcePVC
+ type: string
+ - JSONPath: .spec.source.volumeSnapshotContentName
+ description: Name of the VolumeSnapshotContent which represents a pre-provisioned
+ snapshot.
+ name: SourceSnapshotContent
+ type: string
+ - JSONPath: .status.restoreSize
+ description: Represents the complete size of the snapshot.
+ name: RestoreSize
+ type: string
+ - JSONPath: .spec.volumeSnapshotClassName
+ description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot.
+ name: SnapshotClass
+ type: string
+ - JSONPath: .status.boundVolumeSnapshotContentName
+ description: The name of the VolumeSnapshotContent to which this VolumeSnapshot
+ is bound.
+ name: SnapshotContent
+ type: string
+ - JSONPath: .status.creationTime
+ description: Timestamp when the point-in-time snapshot is taken by the underlying
+ storage system.
+ name: CreationTime
+ type: date
+ - JSONPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ group: snapshot.storage.k8s.io
+ names:
+ kind: VolumeSnapshot
+ listKind: VolumeSnapshotList
+ plural: volumesnapshots
+ singular: volumesnapshot
+ preserveUnknownFields: false
+ scope: Namespaced
+ subresources:
+ status: {}
+ validation:
+ openAPIV3Schema:
+ description: VolumeSnapshot is a user's request for either creating a point-in-time
+ snapshot of a persistent volume, or binding to a pre-existing snapshot.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ spec:
+ description: 'spec defines the desired characteristics of a snapshot requested
+ by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots
+ Required.'
+ properties:
+ source:
+ description: source specifies where a snapshot will be created from.
+ This field is immutable after creation. Required.
+ properties:
+ persistentVolumeClaimName:
+ description: persistentVolumeClaimName specifies the name of the
+ PersistentVolumeClaim object in the same namespace as the VolumeSnapshot
+ object where the snapshot should be dynamically taken from. This
+ field is immutable.
+ type: string
+ volumeSnapshotContentName:
+ description: volumeSnapshotContentName specifies the name of a pre-existing
+ VolumeSnapshotContent object. This field is immutable.
+ type: string
+ type: object
+ volumeSnapshotClassName:
+ description: 'volumeSnapshotClassName is the name of the VolumeSnapshotClass
+ requested by the VolumeSnapshot. If not specified, the default snapshot
+ class will be used if one exists. If not specified, and there is no
+ default snapshot class, dynamic snapshot creation will fail. Empty
+ string is not allowed for this field. TODO(xiangqian): a webhook validation
+ on empty string. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes'
+ type: string
+ required:
+ - source
+ type: object
+ status:
+ description: 'status represents the current information of a snapshot. NOTE:
+ status can be modified by sources other than system controllers, and must
+ not be depended upon for accuracy. Controllers should only use information
+ from the VolumeSnapshotContent object after verifying that the binding
+ is accurate and complete.'
+ properties:
+ boundVolumeSnapshotContentName:
+ description: 'boundVolumeSnapshotContentName represents the name of
+ the VolumeSnapshotContent object to which the VolumeSnapshot object
+ is bound. If not specified, it indicates that the VolumeSnapshot object
+ has not been successfully bound to a VolumeSnapshotContent object
+ yet. NOTE: Specified boundVolumeSnapshotContentName alone does not
+ mean binding is valid. Controllers MUST always verify bidirectional
+ binding between VolumeSnapshot and VolumeSnapshotContent to
+ avoid possible security issues.'
+ type: string
+ creationTime:
+ description: creationTime is the timestamp when the point-in-time snapshot
+ is taken by the underlying storage system. In dynamic snapshot creation
+ case, this field will be filled in with the "creation_time" value
+ returned from CSI "CreateSnapshotRequest" gRPC call. For a pre-existing
+ snapshot, this field will be filled with the "creation_time" value
+ returned from the CSI "ListSnapshots" gRPC call if the driver supports
+ it. If not specified, it indicates that the creation time of the snapshot
+ is unknown.
+ format: date-time
+ type: string
+ error:
+ description: error is the last observed error during snapshot creation,
+ if any. This field could be helpful to upper level controllers(i.e.,
+ application controller) to decide whether they should continue on
+ waiting for the snapshot to be created based on the type of error
+ reported.
+ properties:
+ message:
+ description: 'message is a string detailing the encountered error
+ during snapshot creation if specified. NOTE: message may be logged,
+ and it should not contain sensitive information.'
+ type: string
+ time:
+ description: time is the timestamp when the error was encountered.
+ format: date-time
+ type: string
+ type: object
+ readyToUse:
+ description: readyToUse indicates if a snapshot is ready to be used
+ to restore a volume. In dynamic snapshot creation case, this field
+ will be filled in with the "ready_to_use" value returned from CSI
+ "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, this
+ field will be filled with the "ready_to_use" value returned from the
+ CSI "ListSnapshots" gRPC call if the driver supports it, otherwise,
+ this field will be set to "True". If not specified, it means the readiness
+ of a snapshot is unknown.
+ type: boolean
+ restoreSize:
+ anyOf:
+ - type: integer
+ - type: string
+ description: restoreSize represents the complete size of the snapshot
+ in bytes. In dynamic snapshot creation case, this field will be filled
+ in with the "size_bytes" value returned from CSI "CreateSnapshotRequest"
+ gRPC call. For a pre-existing snapshot, this field will be filled
+ with the "size_bytes" value returned from the CSI "ListSnapshots"
+ gRPC call if the driver supports it. When restoring a volume from
+ this snapshot, the size of the volume MUST NOT be smaller than the
+ restoreSize if it is specified, otherwise the restoration will fail.
+ If not specified, it indicates that the size is unknown.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ required:
+ - spec
+ type: object
+ version: v1beta1
+ versions:
+ - name: v1beta1
+ served: true
+ storage: true
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/dell-csi-helm-installer/common.sh b/dell-csi-helm-installer/common.sh
new file mode 100644
index 00000000..f4b8730e
--- /dev/null
+++ b/dell-csi-helm-installer/common.sh
@@ -0,0 +1,114 @@
+#!/bin/bash
+#
+# Copyright (c) 2020 Dell Inc., or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+
+DRIVERDIR="${SCRIPTDIR}/../helm"
+
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+DARK_GRAY='\033[1;30m'
+NC='\033[0m' # No Color
+
+function log() {
+ case $1 in
+ separator)
+ echo "------------------------------------------------------"
+ ;;
+ error)
+ echo
+ log separator
+ printf "${RED}Error: $2\n"
+ printf "${RED}Installation cannot continue${NC}\n"
+ exit 1
+ ;;
+ step)
+ printf "|\n|- %-65s" "$2"
+ ;;
+ small_step)
+ printf "%-61s" "$2"
+ ;;
+ section)
+ log separator
+ printf "> %s\n" "$2"
+ log separator
+ ;;
+ smart_step)
+ if [[ $3 == "small" ]]; then
+ log small_step "$2"
+ else
+ log step "$2"
+ fi
+ ;;
+ arrow)
+ printf " %s\n %s" "|" "|--> "
+ ;;
+ step_success)
+ printf "${GREEN}Success${NC}\n"
+ ;;
+ step_failure)
+ printf "${RED}Failed${NC}\n"
+ ;;
+ step_warning)
+ printf "${YELLOW}Warning${NC}\n"
+ ;;
+ info)
+ printf "${DARK_GRAY}%s${NC}\n" "$2"
+ ;;
+ passed)
+ printf "${GREEN}Success${NC}\n"
+ ;;
+ warnings)
+ printf "${YELLOW}Warnings:${NC}\n"
+ ;;
+ errors)
+ printf "${RED}Errors:${NC}\n"
+ ;;
+ *)
+ echo -n "Unknown"
+ ;;
+ esac
+}
+
+function check_error() {
+ if [[ $1 -ne 0 ]]; then
+ log step_failure
+ else
+ log step_success
+ fi
+}
+
+#
+# get_drivers will populate an array of drivers found by
+# enumerating the directories in drivers/ that contain a helm chart
+function get_drivers() {
+ D="${1}"
+ TTT=$(pwd)
+ while read -r line; do
+ DDD=$(echo $line | awk -F '/' '{print $(NF-1)}')
+ VALIDDRIVERS+=("$DDD")
+ done < <(find "${D}" -maxdepth 2 -type f -name Chart.yaml | sort)
+}
+
+#
+# get_release will determine the helm release name to use
+# If ${RELEASE} is set, use that
+# Otherwise, use the driver name minus any "csi-" prefix
+# argument 1: Driver name
+function get_release_name() {
+ local D="${1}"
+ if [ ! -z "${RELEASE}" ]; then
+ echo "${RELEASE}"
+ return
+ fi
+
+ local PREFIX="csi-"
+ R=${D#"$PREFIX"}
+ echo "${R}"
+}
diff --git a/dell-csi-helm-installer/csi-install.sh b/dell-csi-helm-installer/csi-install.sh
new file mode 100755
index 00000000..5445960c
--- /dev/null
+++ b/dell-csi-helm-installer/csi-install.sh
@@ -0,0 +1,391 @@
+#!/bin/bash
+#
+# Copyright (c) 2020 Dell Inc., or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+
+SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
+DRIVERDIR="${SCRIPTDIR}/../helm"
+VERIFYSCRIPT="${SCRIPTDIR}/verify.sh"
+SNAPCLASSDIR="${SCRIPTDIR}/beta-snapshot-crd"
+PROG="${0}"
+NODE_VERIFY=1
+VERIFY=1
+MODE="install"
+# version of Snapshot CRD to install. Default is none ("")
+INSTALL_CRD=""
+
+declare -a VALIDDRIVERS
+
+source "$SCRIPTDIR"/common.sh
+
+
+#
+# usage will print command execution help and then exit
+function usage() {
+ echo
+ echo "Help for $PROG"
+ echo
+ echo "Usage: $PROG options..."
+ echo "Options:"
+ echo " Required"
+ echo " --namespace[=] Kubernetes namespace containing the CSI driver"
+ echo " --values[=] Values file, which defines configuration values"
+
+ echo " Optional"
+ echo " --release[=] Name to register with helm, default value will match the driver name"
+ echo " --upgrade Perform an upgrade of the specified driver, default is false"
+ echo " --node-verify-user[=] Username to SSH to worker nodes as, used to validate node requirements. Default is root"
+ echo " --skip-verify Skip the kubernetes configuration verification to use the CSI driver, default will run verification"
+ echo " --skip-verify-node Skip worker node verification checks"
+ echo " --snapshot-crd Install snapshot CRDs. Default will not install Snapshot classes."
+ echo " -h Help"
+ echo
+
+ exit 0
+}
+
+# warning, with an option for users to continue
+function warning() {
+ log separator
+ printf "${YELLOW}WARNING:${NC}\n"
+ for N in "$@"; do
+ echo $N
+ done
+ echo
+ if [ "${ASSUMEYES}" == "true" ]; then
+ echo "Continuing as '-Y' argument was supplied"
+ return
+ fi
+ read -n 1 -p "Press 'y' to continue or any other key to exit: " CONT
+ echo
+ if [ "${CONT}" != "Y" -a "${CONT}" != "y" ]; then
+ echo "quitting at user request"
+ exit 2
+ fi
+}
+
+
+# print header information
+function header() {
+ log section "Installing CSI Driver: ${DRIVER} on ${kMajorVersion}.${kMinorVersion}"
+}
+
+#
+# check_for_driver will see if the driver is already installed within the namespace provided
+function check_for_driver() {
+ log section "Checking to see if CSI Driver is already installed"
+ NUM=$(helm list --namespace "${NS}" | grep "^${RELEASE}\b" | wc -l)
+ if [ "${1}" == "install" -a "${NUM}" != "0" ]; then
+ log error "The CSI Driver is already installed"
+ fi
+ if [ "${1}" == "upgrade" -a "${NUM}" == "0" ]; then
+ log error "The CSI Driver is not installed"
+ fi
+}
+
+#
+# validate_params will validate the parameters passed in
+function validate_params() {
+ # make sure the driver was specified
+ if [ -z "${DRIVER}" ]; then
+ echo "No driver specified"
+ usage
+ exit 1
+ fi
+ # make sure the driver name is valid
+ if [[ ! "${VALIDDRIVERS[@]}" =~ "${DRIVER}" ]]; then
+ echo "Driver: ${DRIVER} is invalid."
+ echo "Valid options are: ${VALIDDRIVERS[@]}"
+ usage
+ exit 1
+ fi
+ # the namespace is required
+ if [ -z "${NS}" ]; then
+ echo "No namespace specified"
+ usage
+ exit 1
+ fi
+ # values file
+ if [ -z "${VALUES}" ]; then
+ echo "No values file was specified"
+ usage
+ exit 1
+ fi
+ if [ ! -f "${VALUES}" ]; then
+ echo "Unable to read values file at: ${VALUES}"
+ usage
+ exit 1
+ fi
+}
+
+#
+# install_driver uses helm to install the driver with a given name
+function install_driver() {
+ if [ "${1}" == "upgrade" ]; then
+ log step "Upgrading Driver"
+ else
+ log step "Installing Driver"
+ fi
+
+ HELMOUTPUT="/tmp/csi-install.$$.out"
+ helm ${1} --values "${DRIVERDIR}/${DRIVER}/k8s-${kMajorVersion}.${kMinorVersion}-values.yaml" --values "${DRIVERDIR}/${DRIVER}/driver-image.yaml" --values "${VALUES}" --namespace ${NS} "${RELEASE}" "${DRIVERDIR}/${DRIVER}" >"${HELMOUTPUT}" 2>&1
+ if [ $? -ne 0 ]; then
+ cat "${HELMOUTPUT}"
+ log error "Helm operation failed, output can be found in ${HELMOUTPUT}. The failure should be examined, before proceeding. Additionally, running csi-uninstall.sh may be needed to clean up partial deployments."
+ fi
+ log step_success
+ # wait for the deployment to finish, use the default timeout
+ waitOnRunning "${NS}" "statefulset ${RELEASE}-controller,daemonset ${RELEASE}-node"
+ if [ $? -eq 1 ]; then
+ warning "Timed out waiting for the operation to complete." \
+ "This does not indicate a fatal error, pods may take a while to start." \
+ "Progress can be checked by running \"kubectl get pods -n ${NS}\""
+ fi
+}
+
+# Print a nice summary at the end
+function summary() {
+ log section "Operation complete"
+}
+
+# waitOnRunning
+# will wait, for a timeout period, for a number of pods to go into Running state within a namespace
+# arguments:
+# $1: required: namespace to watch
+# $2: required: comma seperated list of deployment type and name pairs
+# for example: "statefulset mystatefulset,daemonset mydaemonset"
+# $3: optional: timeout value, 300 seconds is the default.
+function waitOnRunning() {
+ if [ -z "${2}" ]; then
+ echo "No namespace and/or list of deployments was supplied. This field is required for waitOnRunning"
+ return 1
+ fi
+ # namespace
+ local NS="${1}"
+ # pods
+ IFS="," read -r -a PODS <<<"${2}"
+ # timeout value passed in, or 300 seconds as a default
+ local TIMEOUT="300"
+ if [ -n "${3}" ]; then
+ TIMEOUT="${3}"
+ fi
+
+ error=0
+ for D in "${PODS[@]}"; do
+ log arrow
+ log smart_step "Waiting for $D to be ready" "small"
+ kubectl -n "${NS}" rollout status --timeout=${TIMEOUT}s ${D} >/dev/null 2>&1
+ if [ $? -ne 0 ]; then
+ error=1
+ log step_failure
+ else
+ log step_success
+ fi
+ done
+
+ if [ $error -ne 0 ]; then
+ return 1
+ fi
+ return 0
+}
+
+function kubectl_safe() {
+ eval "kubectl $1"
+ exitcode=$?
+ if [[ $exitcode != 0 ]]; then
+ echo "$2"
+ exit $exitcode
+ fi
+}
+
+#
+# install_snapshot_crds
+# Downloads and installs snapshot CRDs
+function install_snapshot_crd() {
+ if [ "${INSTALL_CRD}" == "" ]; then
+ return
+ fi
+ log step "Checking and installing snapshot crds"
+
+ declare -A SNAPCLASSES=(
+ ["volumesnapshotclasses"]="snapshot.storage.k8s.io_volumesnapshotclasses.yaml"
+ ["volumesnapshotcontents"]="snapshot.storage.k8s.io_volumesnapshotcontents.yaml"
+ ["volumesnapshots"]="snapshot.storage.k8s.io_volumesnapshots.yaml"
+ )
+
+ for C in "${!SNAPCLASSES[@]}"; do
+ F="${SNAPCLASSES[$C]}"
+ # check if custom resource exists
+ kubectl_safe "get customresourcedefinitions" "Failed to get crds" | grep "${C}" --quiet
+
+ if [[ $? -ne 0 ]]; then
+ # make sure CRD exists
+ if [ ! -f "${SNAPCLASSDIR}/${SNAPCLASSES[$C]}" ]; then
+ echo "Unable to to find Snapshot Classes at ${SNAPCLASSDIR}"
+ exit 1
+ fi
+ # create the custom resource
+ kubectl_safe "create -f ${SNAPCLASSDIR}/${SNAPCLASSES[$C]}" "Failed to create Volume Snapshot Beta CRD: ${C}"
+ fi
+ done
+
+ sleep 10s
+ log step_success
+}
+
+#
+# verify_kubernetes
+# will run a driver specific function to verify environmental requirements
+function verify_kubernetes() {
+ EXTRA_OPTS=""
+ if [ $VERIFY -eq 0 ]; then
+ echo "Skipping verification at user request"
+ else
+ if [ $NODE_VERIFY -eq 0 ]; then
+ EXTRA_OPTS="$EXTRA_OPTS --skip-verify-node"
+ fi
+ if [ "${INSTALL_CRD}" == "yes" ]; then
+ EXTRA_OPTS="$EXTRA_OPTS --snapshot-crd"
+ fi
+ "${VERIFYSCRIPT}" --namespace "${NS}" --release "${RELEASE}" --values "${VALUES}" --node-verify-user "${NODEUSER}" ${EXTRA_OPTS}
+ VERIFYRC=$?
+ case $VERIFYRC in
+ 0) ;;
+
+ 1)
+ warning "Kubernetes validation failed but installation can continue. " \
+ "This may affect driver installation."
+ ;;
+ *)
+ log error "Kubernetes validation failed."
+ ;;
+ esac
+ fi
+}
+
+#
+# main
+#
+VERIFYOPTS=""
+ASSUMEYES="false"
+
+# get the list of valid CSI Drivers, this will be the list of directories in drivers/ that contain helm charts
+get_drivers "${DRIVERDIR}"
+# if only one driver was found, set the DRIVER to that one
+if [ ${#VALIDDRIVERS[@]} -eq 1 ]; then
+ DRIVER="${VALIDDRIVERS[0]}"
+fi
+
+while getopts ":h-:" optchar; do
+ case "${optchar}" in
+ -)
+ case "${OPTARG}" in
+ skip-verify)
+ VERIFY=0
+ ;;
+ skip-verify-node)
+ NODE_VERIFY=0
+ ;;
+ # SNAPSHOT_CRD
+ snapshot-crd)
+ INSTALL_CRD="yes"
+ ;;
+ upgrade)
+ MODE="upgrade"
+ ;;
+ # NAMESPACE
+ namespace)
+ NS="${!OPTIND}"
+ if [[ -z ${NS} || ${NS} == "--skip-verify" ]]; then
+ NS=${DEFAULT_NS}
+ else
+ OPTIND=$((OPTIND + 1))
+ fi
+ ;;
+ namespace=*)
+ NS=${OPTARG#*=}
+ if [[ -z ${NS} ]]; then NS=${DEFAULT_NS}; fi
+ ;;
+ # RELEASE
+ release)
+ RELEASE="${!OPTIND}"
+ OPTIND=$((OPTIND + 1))
+ ;;
+ release=*)
+ RELEASE=${OPTARG#*=}
+ ;;
+ # VALUES
+ values)
+ VALUES="${!OPTIND}"
+ OPTIND=$((OPTIND + 1))
+ ;;
+ values=*)
+ VALUES=${OPTARG#*=}
+ ;;
+ # NODEUSER
+ node-verify-user)
+ NODEUSER="${!OPTIND}"
+ OPTIND=$((OPTIND + 1))
+ ;;
+ node-verify-user=*)
+ HODEUSER=${OPTARG#*=}
+ ;;
+ *)
+ echo "Unknown option --${OPTARG}"
+ echo "For help, run $PROG -h"
+ exit 1
+ ;;
+ esac
+ ;;
+ h)
+ usage
+ ;;
+ *)
+ echo "Unknown option -${OPTARG}"
+ echo "For help, run $PROG -h"
+ exit 1
+ ;;
+ esac
+done
+
+# by default the NAME of the helm release of the driver is the same as the driver name
+RELEASE=$(get_release_name "${DRIVER}")
+# by default, NODEUSER is root
+NODEUSER="${NODEUSER:-root}"
+
+# make sure kubectl is available
+kubectl --help >&/dev/null || {
+ echo "kubectl required for installation... exiting"
+ exit 2
+}
+# make sure helm is available
+helm --help >&/dev/null || {
+ echo "helm required for installation... exiting"
+ exit 2
+}
+
+# Get the kubernetes major and minor version numbers.
+kMajorVersion=$(kubectl version | grep 'Server Version' | sed -e 's/^.*Major:"//' -e 's/[^0-9].*//g')
+kMinorVersion=$(kubectl version | grep 'Server Version' | sed -e 's/^.*Minor:"//' -e 's/[^0-9].*//g')
+
+# validate the parameters passed in
+validate_params "${MODE}"
+
+header
+check_for_driver "${MODE}"
+verify_kubernetes
+
+if [[ "${INSTALL_CRD}" != "" ]]; then
+ install_snapshot_crd
+fi
+
+
+# all good, keep processing
+install_driver "${MODE}"
+
+summary
diff --git a/dell-csi-helm-installer/csi-uninstall.sh b/dell-csi-helm-installer/csi-uninstall.sh
new file mode 100755
index 00000000..e3e8e5cf
--- /dev/null
+++ b/dell-csi-helm-installer/csi-uninstall.sh
@@ -0,0 +1,130 @@
+#!/bin/bash
+#
+# Copyright (c) 2020 Dell Inc., or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+
+SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+DRIVERDIR="${SCRIPTDIR}/../helm"
+PROG="${0}"
+
+declare -a VALIDDRIVERS
+
+source "$SCRIPTDIR"/common.sh
+
+#
+# usage will print command execution help and then exit
+function usage() {
+ echo "Help for $PROG"
+ echo
+ echo "Usage: $PROG options..."
+ echo "Options:"
+ echo " Required"
+ echo " --namespace[=] Kubernetes namespace to uninstall the CSI driver from"
+
+ echo " Optional"
+ echo " --release[=] Name to register with helm, default value will match the driver name"
+ echo " -h Help"
+ echo
+
+ exit 0
+}
+
+
+
+#
+# validate_params will validate the parameters passed in
+function validate_params() {
+ # make sure the driver was specified
+ if [ -z "${DRIVER}" ]; then
+ echo "No driver specified"
+ exit 1
+ fi
+ # make sure the driver name is valid
+ if [[ ! "${VALIDDRIVERS[@]}" =~ "${DRIVER}" ]]; then
+ echo "Driver: ${DRIVER} is invalid."
+ echo "Valid options are: ${VALIDDRIVERS[@]}"
+ exit 1
+ fi
+ # the namespace is required
+ if [ -z "${NAMESPACE}" ]; then
+ echo "No namespace specified"
+ exit 1
+ fi
+}
+
+
+# check_for_driver will see if the driver is installed within the namespace provided
+function check_for_driver() {
+ NUM=$(helm list --namespace "${NAMESPACE}" | grep "^${RELEASE}\b" | wc -l)
+ if [ "${NUM}" == "0" ]; then
+ echo "The CSI Driver is not installed."
+ exit 1
+ fi
+}
+
+# get the list of valid CSI Drivers, this will be the list of directories in drivers/ that contain helm charts
+get_drivers "${DRIVERDIR}"
+# if only one driver was found, set the DRIVER to that one
+if [ ${#VALIDDRIVERS[@]} -eq 1 ]; then
+ DRIVER="${VALIDDRIVERS[0]}"
+fi
+
+while getopts ":h-:" optchar; do
+ case "${optchar}" in
+ -)
+ case "${OPTARG}" in
+ # NAMESPACE
+ namespace)
+ NAMESPACE="${!OPTIND}"
+ OPTIND=$((OPTIND + 1))
+ ;;
+ namespace=*)
+ NAMESPACE=${OPTARG#*=}
+ ;;
+ # RELEASE
+ release)
+ RELEASE="${!OPTIND}"
+ OPTIND=$((OPTIND + 1))
+ ;;
+ release=*)
+ RELEASE=${OPTARG#*=}
+ ;;
+ *)
+ echo "Unknown option --${OPTARG}"
+ echo "For help, run $PROG -h"
+ exit 1
+ ;;
+ esac
+ ;;
+ h)
+ usage
+ ;;
+ *)
+ echo "Unknown option -${OPTARG}"
+ echo "For help, run $PROG -h"
+ exit 1
+ ;;
+ esac
+done
+
+# by default the NAME of the helm release of the driver is the same as the driver name
+RELEASE=$(get_release_name "${DRIVER}")
+
+# validate the parameters passed in
+validate_params
+
+check_for_driver
+helm delete -n "${NAMESPACE}" "${RELEASE}"
+if [ $? -ne 0 ]; then
+ echo "Removal of the CSI Driver was unsuccessful"
+ exit 1
+fi
+
+echo "Removal of the CSI Driver is in progress."
+echo "It may take a few minutes for all pods to terminate."
+
diff --git a/dell-csi-helm-installer/verify.sh b/dell-csi-helm-installer/verify.sh
new file mode 100755
index 00000000..4ec05f84
--- /dev/null
+++ b/dell-csi-helm-installer/verify.sh
@@ -0,0 +1,605 @@
+#!/bin/bash
+#
+# Copyright (c) 2020 Dell Inc., or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+
+SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
+PROG="${0}"
+source "$SCRIPTDIR"/common.sh
+
+declare -a VALIDDRIVERS
+
+# verify-csi-powermax method
+function verify-csi-powermax() {
+ verify_k8s_versions "1" "17" "1" "19"
+ verify_namespace "${NS}"
+ verify_required_secrets "${RELEASE}-creds"
+ verify_optional_secrets "${RELEASE}-certs"
+ verify_optional_secrets "csirevproxy-tls-secret"
+ verify_alpha_snap_resources
+ verify_beta_snap_requirements
+ verify_iscsi_installation
+ verify_helm_3
+}
+
+#
+# verify-csi-isilon method
+function verify-csi-isilon() {
+ verify_k8s_versions "1" "17" "1" "19"
+ verify_namespace "${NS}"
+ verify_required_secrets "${RELEASE}-creds"
+ verify_optional_secrets "${RELEASE}-certs"
+ verify_alpha_snap_resources
+ verify_beta_snap_requirements
+ verify_helm_3
+}
+
+#
+# verify-csi-vxflexos method
+function verify-csi-vxflexos() {
+ verify_k8s_versions "1" "17" "1" "19"
+ verify_namespace "${NS}"
+ verify_required_secrets "${RELEASE}-creds"
+ verify_sdc_installation
+ verify_alpha_snap_resources
+ verify_beta_snap_requirements
+ verify_helm_3
+}
+
+# verify-csi-powerstore method
+function verify-csi-powerstore() {
+ verify_k8s_versions "1" "17" "1" "19"
+ verify_namespace "${NS}"
+ verify_required_secrets "${RELEASE}-creds"
+ verify_alpha_snap_resources
+ verify_beta_snap_requirements
+ verify_powerstore_node_configuration
+ verify_helm_3
+}
+
+# verify-csi-unity method
+function verify-csi-unity() {
+ verify_k8s_versions "1" "17" "1" "19"
+ verify_namespace "${NS}"
+ verify_required_secrets "${RELEASE}-creds"
+ verify_required_secrets "${RELEASE}-certs-0"
+ verify_alpha_snap_resources
+ verify_beta_snap_requirements
+ verify_helm_3
+}
+
+#
+# verify-driver will call the proper method to verify a specific driver
+function verify-driver() {
+ if [ -z "${1}" ]; then
+ echo "Expected one argument, the driver name, to verify-driver. Received none."
+ exit $EXIT_ERROR
+ fi
+ local D="${1}"
+ # check if a verify-$DRIVER function exists
+ # if not, error and exit
+ # if yes, check to see if it should be run and run it
+ FNTYPE=$(type -t verify-$D)
+ if [ "$FNTYPE" != "function" ]; then
+ echo "ERROR: verify-$D function does not exist"
+ exit $EXIT_ERROR
+ else
+ header
+ log step "Driver: ${D}"
+ echo
+ verify-$D
+ summary
+ fi
+}
+
+# Print usage information
+function usage() {
+ echo
+ echo "Help for $PROG"
+ echo
+ echo "Usage: $PROG options..."
+ echo "Options:"
+ echo " Required"
+ echo " --namespace[=] Kubernetes namespace to install the CSI driver"
+ echo " --values[=] Values file, which defines configuration values"
+
+ echo " Optional"
+ echo " --skip-verify-node Skip worker node verification checks"
+ echo " --release[=] Name to register with helm, default value will match the driver name"
+ echo " --node-verify-user[=] Username to SSH to worker nodes as, used to validate node requirements. Default is root"
+ echo " --snapshot-crd Signifies that the Snapshot CRDs will be installed as part of installation."
+ echo " -h Help"
+ echo
+
+ exit $EXIT_WARNING
+}
+
+# print header information
+function header() {
+ log section "Verifying Kubernetes and driver configuration"
+ echo "|- Kubernetes Version: ${kMajorVersion}.${kMinorVersion}"
+}
+
+# Check if the SDC is installed and the kernel module loaded
+function verify_sdc_installation() {
+ if [ ${NODE_VERIFY} -eq 0 ]; then
+ return
+ fi
+ log step "Verifying the SDC installation"
+
+ error=0
+ missing=()
+ for node in $MINION_NODES; do
+ # check is the scini kernel module is loaded
+ ssh ${NODEUSER}@$node "/sbin/lsmod | grep scini" >/dev/null 2>&1
+ rv=$?
+ if [ $rv -ne 0 ]; then
+ missing+=($node)
+ error=1
+ found_warning "SDC was not found on node: $node"
+ fi
+ done
+ check_error error
+}
+
+function verify_powerstore_node_configuration() {
+ if [ ${NODE_VERIFY} -eq 0 ]; then
+ return
+ fi
+
+ log step "Verifying PowerStore node configuration"
+ echo
+
+ if ls "${VALUES}" >/dev/null; then
+ if grep -c "scsiProtocol:[[:blank:]]\+FC" "${VALUES}" >/dev/null; then
+ log arrow
+ verify_fc_installation
+ elif grep -c "scsiProtocol:[[:blank:]]\+ISCSI" "${VALUES}" >/dev/null; then
+ log arrow
+ verify_iscsi_installation "small"
+ elif grep -c "scsiProtocol:[[:blank:]]\+auto" "${VALUES}" >/dev/null; then
+ log arrow
+ verify_iscsi_installation "small"
+ log arrow
+ verify_fc_installation "small"
+ elif grep -c "scsiProtocol:[[:blank:]]\+None" "${VALUES}" >/dev/null; then
+ log step_warning
+ found_warning "Neither FC nor iSCSI connection is activated, please be sure that NFS settings are correct"
+ else
+ log step_failure
+ found_error "Incorrect scsiProtocol value, must be 'FC', 'ISCSI', 'auto' or 'None'"
+ fi
+ else
+ log step_failure
+ found_error "${VALUES} doesn't exists"
+ fi
+}
+
+# Check if the iSCSI client is installed
+function verify_iscsi_installation() {
+ if [ ${NODE_VERIFY} -eq 0 ]; then
+ return
+ fi
+
+ log smart_step "Verifying iSCSI installation" "$1"
+
+ error=0
+ for node in $MINION_NODES; do
+ # check if the iSCSI client is installed
+ ssh ${NODEUSER}@"${node}" "cat /etc/iscsi/initiatorname.iscsi" >/dev/null 2>&1
+ rv=$?
+ if [ $rv -ne 0 ]; then
+ error=1
+ found_warning "iSCSI client was not found on node: $node"
+ fi
+ ssh ${NODEUSER}@"${node}" pgrep iscsid &>/dev/null
+ rv=$?
+ if [ $rv -ne 0 ]; then
+ error=1
+ found_warning "iscsid is not running on node: $node"
+ fi
+ done
+
+ check_error error
+}
+
+# Check if the fc is installed
+function verify_fc_installation() {
+ if [ ${NODE_VERIFY} -eq 0 ]; then
+ return
+ fi
+
+ log smart_step "Verifying FC installation" "$1"
+
+ error=0
+ for node in $MINION_NODES; do
+ # check if FC hosts are available
+ ssh ${NODEUSER}@${node} 'ls --hide=* /sys/class/fc_host/* 1>/dev/null' &>/dev/null
+ rv=$?
+ if [[ ${rv} -ne 0 ]]; then
+ error=1
+ found_warning "can't find any FC hosts on node: $node"
+ fi
+ done
+
+ check_error error
+}
+
+# verify secrets exist
+function verify_required_secrets() {
+ log step "Verifying that required secrets have been created"
+
+ error=0
+ for N in "${@}"; do
+ # Make sure the secret has already been established
+ kubectl get secrets -n "${NS}" 2>/dev/null | grep "${N}" --quiet
+ if [ $? -ne 0 ]; then
+ error=1
+ found_error "Required secret, ${N}, does not exist."
+ fi
+ done
+ check_error error
+}
+
+function verify_optional_secrets() {
+ log step "Verifying that optional secrets have been created"
+
+ error=0
+ for N in "${@}"; do
+ # Make sure the secret has already been established
+ kubectl get secrets -n "${NS}" 2>/dev/null | grep "${N}" --quiet
+ if [ $? -ne 0 ]; then
+ error=1
+ found_warning "Optional secret, ${N}, does not exist."
+ fi
+ done
+ check_error error
+}
+
+# verify minimum and maximum k8s versions
+function verify_k8s_versions() {
+ log step "Verifying Kubernetes versions"
+ echo
+ log arrow
+ verify_min_k8s_version "$1" "$2" "small"
+ log arrow
+ verify_max_k8s_version "$3" "$4" "small"
+}
+
+# verify minimum k8s version
+function verify_min_k8s_version() {
+ log smart_step "Verifying minimum Kubernetes version" "$3"
+
+ error=0
+ if [[ "${1}" -gt "${kMajorVersion}" ]]; then
+ error=1
+ found_error "Kubernetes version, ${kMajorVersion}.${kMinorVersion}, is too old. Minimum required version is: ${1}.${2}"
+ fi
+ if [[ "${2}" -gt "${kMinorVersion}" ]]; then
+ error=1
+ found_error "Kubernetes version, ${kMajorVersion}.${kMinorVersion}, is too old. Minimum required version is: ${1}.${2}"
+ fi
+
+ check_error error
+}
+
+# verify maximum k8s version
+function verify_max_k8s_version() {
+ log smart_step "Verifying maximum Kubernetes version" "$3"
+
+ error=0
+ if [[ "${1}" -lt "${kMajorVersion}" ]]; then
+ error=1
+ found_warning "Kubernetes version, ${kMajorVersion}.${kMinorVersion}, is newer than has been tested. Last tested version is: ${1}.${2}"
+ fi
+ if [[ "${2}" -lt "${kMinorVersion}" ]]; then
+ error=1
+ found_warning "Kubernetes version, ${kMajorVersion}.${kMinorVersion}, is newer than has been tested. Last tested version is: ${1}.${2}"
+ fi
+
+ check_error error
+}
+
+# verify namespace
+function verify_namespace() {
+ log step "Verifying that required namespaces have been created"
+
+ error=0
+ for N in "${@}"; do
+ # Make sure the namespace exists
+ kubectl describe namespace "${N}" >/dev/null 2>&1
+ if [ $? -ne 0 ]; then
+ error=1
+ found_error "Namespace does not exist: ${N}"
+ fi
+ done
+
+ check_error error
+}
+
+# verify that the no alpha version of volume snapshot resource is present on the system
+function verify_alpha_snap_resources() {
+ log step "Verifying alpha snapshot resources"
+ echo
+ log arrow
+ log smart_step "Verifying that alpha snapshot CRDs are not installed" "small"
+
+ error=0
+ # check for the alpha snapshot CRDs. These shouldn't be present for installation to proceed with
+ CRDS=("VolumeSnapshotClasses" "VolumeSnapshotContents" "VolumeSnapshots")
+ for C in "${CRDS[@]}"; do
+ # Verify that alpha snapshot related CRDs/CRs are not there on the system.
+ kubectl explain ${C} 2> /dev/null | grep "^VERSION.*v1alpha1$" --quiet
+ if [ $? -eq 0 ]; then
+ error=1
+ found_error "The alhpa CRD for ${C} is installed. Please uninstall it"
+ if [[ $(kubectl get ${C} -A --no-headers 2>/dev/null | wc -l) -ne 0 ]]; then
+ found_error " Found CR for alpha CRD ${C}. Please delete it"
+ fi
+ fi
+ done
+ check_error error
+}
+
+# verify that the requirements for beta snapshot support exist
+function verify_beta_snap_requirements() {
+ log step "Verifying beta snapshot support"
+ echo
+ log arrow
+ log smart_step "Verifying that beta snapshot CRDs are available" "small"
+
+ error=0
+ # check for the CRDs. These are required for installation
+ CRDS=("VolumeSnapshotClasses" "VolumeSnapshotContents" "VolumeSnapshots")
+ for C in "${CRDS[@]}"; do
+ # Verify if snapshot related CRDs are there on the system. If not install them.
+ kubectl explain ${C} 2> /dev/null | grep "^VERSION.*v1beta1$" --quiet
+ if [ $? -ne 0 ]; then
+ error=1
+ if [ "${INSTALL_CRD}" == "yes" ]; then
+ found_warning "The beta CRD for ${C} is not installed. They will be installed because --snapshot-crd was specified"
+ else
+ found_error "The beta CRD for ${C} is not installed. These can be installed by specifying --snapshot-crd during installation"
+ fi
+ fi
+ done
+ check_error error
+
+ log arrow
+ log smart_step "Verifying that beta snapshot controller is available" "small"
+
+ error=0
+ # check for the snapshot-controller. These are strongly suggested but not required
+ kubectl get pods -A | grep snapshot-controller --quiet
+ if [ $? -ne 0 ]; then
+ error=1
+ found_warning "The Snapshot Controller does not seem to be deployed. The Snapshot Controller should be provided by the Kubernetes vendor or administrator."
+ fi
+
+ check_error error
+}
+
+# verify that helm is v3 or above
+function verify_helm_3() {
+ log step "Verifying helm version"
+
+ error=0
+ # Check helm installer version
+ helm --help >&/dev/null || {
+ found_error "helm is required for installation"
+ log step_failure
+ return
+ }
+
+ helm version | grep "v3." --quiet
+ if [ $? -ne 0 ]; then
+ error=1
+ found_error "Driver installation is supported only using helm 3"
+ fi
+
+ check_error error
+}
+
+# found_error, installation will not continue
+function found_error() {
+ for N in "$@"; do
+ ERRORS+=("${N}")
+ done
+}
+
+# found_warning, installation can continue
+function found_warning() {
+ for N in "$@"; do
+ WARNINGS+=("${N}")
+ done
+}
+
+# Print a nice summary at the end
+function summary() {
+ echo
+ log section "Verification Complete"
+ # print all the WARNINGS
+ NON_CRD_WARNINGS=0
+ if [ "${#WARNINGS[@]}" -ne 0 ]; then
+ log warnings
+ for E in "${WARNINGS[@]}"; do
+ echo "- ${E}"
+ echo ${E} | grep --quiet "^The beta CRD for VolumeSnapshot"
+ if [ $? -ne 0 ]; then
+ NON_CRD_WARNINGS=1
+ fi
+ done
+ RC=$EXIT_WARNING
+ if [ "${INSTALL_CRD}" == "yes" -a ${NON_CRD_WARNINGS} -eq 0 ]; then
+ RC=$EXIT_SUCCESS
+ fi
+ fi
+
+ # print all the ERRORS
+ if [ "${#ERRORS[@]}" -ne 0 ]; then
+ log errors
+ for E in "${ERRORS[@]}"; do
+ echo "- ${E}"
+ done
+ RC=$EXIT_ERROR
+ fi
+
+ return $RC
+}
+
+#
+# validate_params will validate the parameters passed in
+function validate_params() {
+ # make sure the driver was specified
+ if [ -z "${DRIVER}" ]; then
+ echo "No driver specified"
+ usage
+ exit 1
+ fi
+ # make sure the driver name is valid
+ if [[ ! "${VALIDDRIVERS[@]}" =~ "${DRIVER}" ]]; then
+ echo "Driver: ${DRIVER} is invalid."
+ echo "Valid options are: ${VALIDDRIVERS[@]}"
+ usage
+ exit 1
+ fi
+ # the namespace is required
+ if [ -z "${NS}" ]; then
+ echo "No namespace specified"
+ usage
+ exit 1
+ fi
+ # values file
+ if [ -z "${VALUES}" ]; then
+ echo "No values file was specified"
+ usage
+ exit 1
+ fi
+ if [ ! -f "${VALUES}" ]; then
+ echo "Unable to read values file at: ${VALUES}"
+ usage
+ exit 1
+ fi
+}
+
+#
+# main
+#
+# default values
+
+NODE_VERIFY=1
+
+# exit codes
+EXIT_SUCCESS=0
+EXIT_WARNING=1
+EXIT_ERROR=99
+
+# arrays of messages
+WARNINGS=()
+ERRORS=()
+
+INSTALL_CRD="no"
+
+# make sure kubectl is available
+kubectl --help >&/dev/null || {
+ echo "kubectl required for verification... exiting"
+ exit $EXIT_ERROR
+}
+
+# Determine the nodes
+MINION_NODES=$(kubectl get nodes -o wide | grep -v -e master -e INTERNAL | awk ' { print $6; }')
+MASTER_NODES=$(kubectl get nodes -o wide | awk ' /master/{ print $6; }')
+# Get the kubernetes major and minor version numbers.
+kMajorVersion=$(kubectl version | grep 'Server Version' | sed -e 's/^.*Major:"//' -e 's/[^0-9].*//g')
+kMinorVersion=$(kubectl version | grep 'Server Version' | sed -e 's/^.*Minor:"//' -e 's/[^0-9].*//g')
+
+# get the list of valid CSI Drivers, this will be the list of directories in drivers/ that contain helm charts
+get_drivers "${SCRIPTDIR}/../helm"
+# if only one driver was found, set the DRIVER to that one
+if [ ${#VALIDDRIVERS[@]} -eq 1 ]; then
+ DRIVER="${VALIDDRIVERS[0]}"
+fi
+
+while getopts ":h-:" optchar; do
+ case "${optchar}" in
+ -)
+ case "${OPTARG}" in
+ # INSTALL_CRD. Signifies that we were asked to install the CRDs
+ snapshot-crd)
+ INSTALL_CRD="yes"
+ ;;
+ skip-verify-node)
+ NODE_VERIFY=0
+ ;;
+ # NAMESPACE
+ namespace)
+ NS="${!OPTIND}"
+ if [[ -z ${NS} || ${NS} == "--skip-verify" ]]; then
+ NS=${DEFAULT_NS}
+ else
+ OPTIND=$((OPTIND + 1))
+ fi
+ ;;
+ namespace=*)
+ NS=${OPTARG#*=}
+ if [[ -z ${NS} ]]; then NS=${DEFAULT_NS}; fi
+ ;;
+ # RELEASE
+ release)
+ RELEASE="${!OPTIND}"
+ OPTIND=$((OPTIND + 1))
+ ;;
+ release=*)
+ RELEASE=${OPTARG#*=}
+ ;;
+ # VALUES
+ values)
+ VALUES="${!OPTIND}"
+ OPTIND=$((OPTIND + 1))
+ ;;
+ values=*)
+ VALUES=${OPTARG#*=}
+ ;;
+ # NODEUSER
+ node-verify-user)
+ NODEUSER="${!OPTIND}"
+ OPTIND=$((OPTIND + 1))
+ ;;
+ node-verify-user=*)
+ HODEUSER=${OPTARG#*=}
+ ;;
+ *)
+ echo "Unknown option --${OPTARG}"
+ echo "For help, run $PROG -h"
+ exit $EXIT_ERROR
+ ;;
+ esac
+ ;;
+ h)
+ usage
+ ;;
+ *)
+ echo "Unknown option -${OPTARG}"
+ echo "For help, run $PROG -h"
+ exit $EXIT_ERROR
+ ;;
+ esac
+done
+
+# by default the NAME of the helm release of the driver is the same as the driver name
+RELEASE=$(get_release_name "${DRIVER}")
+
+#"${RELEASE:-$DRIVER}"
+# by default, NODEUSER is root
+NODEUSER="${NODEUSER:-root}"
+
+# validate the parameters passed in
+validate_params "${MODE}"
+
+verify-driver "${DRIVER}"
+exit $?
diff --git a/docker.mk b/docker.mk
deleted file mode 100644
index 29e27ca7..00000000
--- a/docker.mk
+++ /dev/null
@@ -1,36 +0,0 @@
-# Includes the following generated file to get semantic version information
-include semver.mk
-ifdef NOTES
- RELNOTE="-$(NOTES)"
-else
- RELNOTE=
-endif
-
-# local build, use user and timestamp it
-NAME:=csi-unity
-DOCKER_IMAGE_NAME ?= ${NAME}-${USER}
-VERSION:=$(shell date +%Y%m%d%H%M%S)
-BIN_DIR:=bin
-BIN_NAME:=${NAME}
-DOCKER_REPO ?= amaas-eos-mw1.cec.lab.emc.com:5028
-DOCKER_NAMESPACE ?= csi-unity
-DOCKER_IMAGE_TAG ?= ${VERSION}
-
-.PHONY: docker-build
-docker-build:
- echo ${VERSION} ${GITLAB_CI} ${CI_COMMIT_TAG} ${CI_COMMIT_SHA}
- rm -f core/core_generated.go
- cd core && go generate
- go run core/semver/semver.go -f mk >semver.mk
- mkdir -p ${BIN_DIR}
- GOOS=linux CGO_ENABLED=0 GOARCH=amd64 go build -ldflags '-extldflags "-static"' -o ${BIN_DIR}/${BIN_NAME}
- docker build -t ${DOCKER_IMAGE_NAME}:${DOCKER_IMAGE_TAG} .
- docker tag ${DOCKER_IMAGE_NAME}:${DOCKER_IMAGE_TAG} ${DOCKER_REPO}/${DOCKER_NAMESPACE}/${DOCKER_IMAGE_NAME}:${DOCKER_IMAGE_TAG}
-
-.PHONY: docker-push
-docker-push: docker-build
- docker push ${DOCKER_REPO}/${DOCKER_NAMESPACE}/${DOCKER_IMAGE_NAME}:${DOCKER_IMAGE_TAG}
-
-version:
- @echo "MAJOR $(MAJOR) MINOR $(MINOR) PATCH $(PATCH) BUILD ${BUILD} TYPE ${TYPE} RELNOTE $(RELNOTE) SEMVER $(SEMVER)"
- @echo "Target Version: $(VERSION)"
diff --git a/env.sh b/env.sh
index 3dc8f5ae..07396649 100644
--- a/env.sh
+++ b/env.sh
@@ -1,16 +1,11 @@
#!/bin/sh
-export X_CSI_UNITY_ENDPOINT=https://0.0.0.0
-export X_CSI_UNITY_USER=
-export X_CSI_UNITY_PASSWORD=
-export X_CSI_UNITY_INSECURE=true
export X_CSI_UNITY_NODENAME=
-export X_CSI_PRIVATE_MOUNT_DIR=
export X_CSI_STAGING_TARGET_PATH=
export X_CSI_PUBLISH_TARGET_PATH=
export CSI_ENDPOINT=
export X_CSI_DEBUG=
export STORAGE_POOL=
-export STORAGE_POOL_NAME=
+export NAS_SERVER=
export X_CSI_REQ_LOGGING="true"
export X_CSI_REP_LOGGING="true"
export GOUNITY_DEBUG="true"
diff --git a/go.mod b/go.mod
index 9502ebb4..04bcab7c 100644
--- a/go.mod
+++ b/go.mod
@@ -6,17 +6,14 @@ require (
github.com/DATA-DOG/godog v0.7.13
github.com/container-storage-interface/spec v1.1.0
github.com/dell/gobrick v1.0.0
- github.com/dell/gofsutil v1.2.0
+ github.com/dell/gofsutil v1.3.0
github.com/dell/goiscsi v1.1.0
- github.com/dell/gounity v1.2.1
+ github.com/dell/gounity v1.3.0
github.com/fsnotify/fsnotify v1.4.9
- github.com/golang/protobuf v1.3.2
+ github.com/golang/protobuf v1.4.2
github.com/rexray/gocsi v1.1.0
- github.com/sirupsen/logrus v1.4.2
+ github.com/sirupsen/logrus v1.6.0
github.com/stretchr/testify v1.4.0
- golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 // indirect
- golang.org/x/net v0.0.0-20190620200207-3b0461eec859
- golang.org/x/text v0.3.2 // indirect
- google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 // indirect
- google.golang.org/grpc v1.21.1
+ golang.org/x/net v0.0.0-20200625001655-4c5254603344
+ google.golang.org/grpc v1.26.0
)
diff --git a/go.sum b/go.sum
index 526801df..eafa7367 100644
--- a/go.sum
+++ b/go.sum
@@ -1,17 +1,44 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DATA-DOG/godog v0.7.13 h1:JmgpKcra7Vf3yzI9vPsWyoQRx13tyKziHtXWDCUUgok=
github.com/DATA-DOG/godog v0.7.13/go.mod h1:z2OZ6a3X0/YAKVqLfVzYBwFt3j6uSt3Xrqa7XTtcQE0=
+github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
+github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
+github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
github.com/akutz/gosync v0.1.0 h1:naxPT/aDYDh79PMwM3XmencmNQeYmpNFSZy4ZE9zIW0=
github.com/akutz/gosync v0.1.0/go.mod h1:I8I4aiqJI1nqaeYOOB1WS+CgRJVVPqhct9Y4njywM84=
github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
+github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
+github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
+github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/container-storage-interface/spec v1.1.0 h1:qPsTqtR1VUPvMPeK0UnCZMtXaKGyyLPG8gj/wG6VqMs=
github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
github.com/container-storage-interface/spec v1.2.0 h1:bD9KIVgaVKKkQ/UbVUY9kCaH/CJbhNxe0eeB4JeJV2s=
@@ -19,9 +46,14 @@ github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93C
github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190612170431-362f06ec6bc1/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -29,23 +61,46 @@ github.com/dell/gobrick v1.0.0 h1:/l5uRqFxmrTBnNtswj3jm1uSsBxUK0y0RqtzhpoO8g0=
github.com/dell/gobrick v1.0.0/go.mod h1:pYN9r3XvIjct/QkEg5mEM+k2L3QBLVaHUZeAMzQgwRg=
github.com/dell/gofsutil v1.2.0 h1:FLvXjNm/mA7y0zpIVgzU61wYIJmNd4VNj8PuY7AR2kw=
github.com/dell/gofsutil v1.2.0/go.mod h1:48eHpMRl0+07uGEnQ7/RE6pTOAVEl74utlGjd0QX/Os=
-github.com/dell/goiscsi v1.0.0 h1:z3hXDIUiVYrrqksWJfJ1mI8Gx6mhQrBidi/LAJwIAyM=
-github.com/dell/goiscsi v1.0.0/go.mod h1:MfuMjbKWsh/MOb0VDW20C+LFYRIOfWKGiAxWkeM5TKo=
-github.com/dell/goiscsi v1.1.0 h1:ByULW5hzYnDLCEC/7E2B03KhTLt/RpeMIwfGszmTe7U=
+github.com/dell/gofsutil v1.3.0 h1:6iDzLAdvrusB5p1yxsW45D2bC9+PUX64tJhH3tgGBN8=
+github.com/dell/gofsutil v1.3.0/go.mod h1:48eHpMRl0+07uGEnQ7/RE6pTOAVEl74utlGjd0QX/Os=
+github.com/dell/goiscsi v1.1.0 h1:0/eHKWhEjuk1AFfyBtiOqY4acW1HrC7FUyB++n2S520=
github.com/dell/goiscsi v1.1.0/go.mod h1:MfuMjbKWsh/MOb0VDW20C+LFYRIOfWKGiAxWkeM5TKo=
-github.com/dell/gounity v1.2.1 h1:HI7co+i5EYYykpwMmbTRw8bQfoO4+61ZgabijPh+fug=
-github.com/dell/gounity v1.2.1/go.mod h1:es7wIQepk+bz5UgDS5Foxv37bCf/0hW5OKAWovBnNp4=
+github.com/dell/gounity v1.3.0 h1:jU8enKbzkizunYA1GLO+XuOB4I644IkDGG1VUx2DFWo=
+github.com/dell/gounity v1.3.0/go.mod h1:FLT3tB1iqGQmK7TxmJZ4Qkfb7faZoaXs9c4gtuBlSTo=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
+github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
@@ -53,51 +108,189 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.1/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
+github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
+github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
+github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
+github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
+github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
+github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
+github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
+github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
+github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
+github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
+github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
+github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
+github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
+github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
+github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
+github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
+github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
+github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
+github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v0.9.4/go.mod h1:oCXIBxdI62A4cR6aTRJCgetEjecSIYzOEaeAn4iYEpM=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.12.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rexray/gocsi v1.1.0 h1:MkstGTZ1x4uf9AtwhOwzovYYYkPM5ZCRFU8ek9+rAy0=
github.com/rexray/gocsi v1.1.0/go.mod h1:kr6L70GxUU6Gu8ehq2dWQmwdILR1tmE05c/OYaTvlx0=
github.com/rexray/gocsi v1.2.1 h1:9e15bmlOLxgEVi2MyruU0dxLotULoE4g/zimPsqEkEM=
github.com/rexray/gocsi v1.2.1/go.mod h1:5V3YEu+6P8HFTSzUjldYM1abIb/4mLnW/3qWzu8yD0Y=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v0.0.0-20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
@@ -106,67 +299,152 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/thecodeteam/gosync v0.1.0/go.mod h1:43QHsngcnWc8GE1aCmi7PEypslflHjCzXFleuWKEb00=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
+go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
golang.org/x/crypto v0.0.0-20171023145632-2509b142fb2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20171028101351-661970f62f58/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
@@ -174,3 +452,4 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/helm/README.md b/helm/README.md
deleted file mode 100644
index 6c04cd58..00000000
--- a/helm/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# Dell EMC Unity Helm Chart for Kubernetes
-
-For detailed installation instructions, please check the doc directory
-
-The general outline is:
-
- 1. Satisfy the pre-requsites outlined in the Release and Installation Notes in the doc directory.
-
- 2. Copy the `csi-unity/values.yaml` to a file `myvalues.yaml` in this directory and fill in various installation parameters.
-
- 3. Invoke the `install.unity` shell script which deploys the helm chart in csi-unity.
\ No newline at end of file
diff --git a/helm/common.bash b/helm/common.bash
deleted file mode 100644
index 6dd28334..00000000
--- a/helm/common.bash
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-
-# Verify kubeadm and kubectl present
-kubectl --help >&/dev/null || {
- echo "kubectl required for installation... exiting"; exit 2
-}
-kubeadm --help >&/dev/null || {
- echo "kubeadm required for installation... exiting"; exit 2
-}
-
-waitOnRunning() {
- TARGET=$(kubectl get pods -n ${NS} | grep ${NS} | wc -l)
- RUNNING=0
- while [ $RUNNING -ne $TARGET ];
- do
- sleep 10
- TARGET=$(kubectl get pods -n ${NS} | grep ${NS} | wc -l)
- RUNNING=$(kubectl get pods -n ${NS} | grep "Running" | wc -l)
- date
- echo running $RUNNING / $TARGET
- kubectl get pods -n ${NS}
- done
-}
-
-# Get the kubernetes major and minor version numbers.
-kMajorVersion=$(kubectl version | grep 'Server Version' | sed -e 's/^.*Major:"//' -e 's/",.*//')
-kMinorVersion=$(kubectl version | grep 'Server Version' | sed -e 's/^.*Minor:"//' -e 's/",.*//')
-
diff --git a/helm/csi-unity/Chart.yaml b/helm/csi-unity/Chart.yaml
index 2b01ab18..c8bb0ca6 100644
--- a/helm/csi-unity/Chart.yaml
+++ b/helm/csi-unity/Chart.yaml
@@ -1,6 +1,6 @@
name: csi-unity
-version: 1.2.1
-appVersion: 1.2.1
+version: 1.3.0
+appVersion: 1.3.0
description: |
Unity CSI (Container Storage Interface) driver Kubernetes
integration. This chart includes everything required to provision via CSI as
diff --git a/helm/csi-unity/driver-image.yaml b/helm/csi-unity/driver-image.yaml
new file mode 100644
index 00000000..f53057b5
--- /dev/null
+++ b/helm/csi-unity/driver-image.yaml
@@ -0,0 +1,4 @@
+# IT IS RECOMMENDED YOU DO NOT CHANGE THE IMAGES TO BE DOWNLOADED.
+images:
+ # "images.driver" defines the container images used for the driver container.
+ driver: dellemc/csi-unity:v1.3.0.000R
diff --git a/helm/csi-unity/k8s-1.16-values.yaml b/helm/csi-unity/k8s-1.16-values.yaml
index 56123dc7..ef9d66bd 100644
--- a/helm/csi-unity/k8s-1.16-values.yaml
+++ b/helm/csi-unity/k8s-1.16-values.yaml
@@ -8,12 +8,15 @@ images:
# "images.provisioner" defines the container images used for the csi provisioner
# container.
- provisioner: quay.io/k8scsi/csi-provisioner:v1.4.0
+ provisioner: quay.io/k8scsi/csi-provisioner:v1.4.0
# "images.snapshotter" defines the container image used for the csi snapshotter
snapshotter: quay.io/k8scsi/csi-snapshotter:v1.2.2
# "images.registrar" defines the container images used for the csi registrar
# container.
- registrar: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
+ registrar: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
+ # "images.resizer" defines the container images used for the csi resizer
+ # container.
+ resizer: quay.io/k8scsi/csi-resizer:v0.5.0
diff --git a/helm/csi-unity/k8s-1.14-values.yaml b/helm/csi-unity/k8s-1.17-values.yaml
similarity index 62%
rename from helm/csi-unity/k8s-1.14-values.yaml
rename to helm/csi-unity/k8s-1.17-values.yaml
index b267b3f2..7a95aa72 100644
--- a/helm/csi-unity/k8s-1.14-values.yaml
+++ b/helm/csi-unity/k8s-1.17-values.yaml
@@ -1,20 +1,22 @@
# IT IS RECOMMENDED YOU DO NOT CHANGE THE IMAGES TO BE DOWNLOADED.
-kubeversion: "v1.14"
+kubeversion: "v1.17"
images:
# "images.attacher" defines the container images used for the csi attacher
# container.
- attacher: quay.io/k8scsi/csi-attacher:v1.2.1
+ attacher: quay.io/k8scsi/csi-attacher:v2.2.0
# "images.provisioner" defines the container images used for the csi provisioner
# container.
- #provisioner: quay.io/k8scsi/csi-provisioner:v0.4.2 #for CSI 0.3.0 only (obsolete)
- provisioner: quay.io/k8scsi/csi-provisioner:v1.2.1
+ provisioner: quay.io/k8scsi/csi-provisioner:v1.5.0
# "images.snapshotter" defines the container image used for the csi snapshotter
- snapshotter: quay.io/k8scsi/csi-snapshotter:v1.2.2
+ snapshotter: quay.io/k8scsi/csi-snapshotter:v2.1.1
# "images.registrar" defines the container images used for the csi registrar
# container.
registrar: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
+ # "images.resizer" defines the container images used for the csi resizer
+ # container.
+ resizer: quay.io/k8scsi/csi-resizer:v0.5.0
diff --git a/helm/csi-unity/k8s-1.18-values.yaml b/helm/csi-unity/k8s-1.18-values.yaml
new file mode 100644
index 00000000..0bb23433
--- /dev/null
+++ b/helm/csi-unity/k8s-1.18-values.yaml
@@ -0,0 +1,22 @@
+# IT IS RECOMMENDED YOU DO NOT CHANGE THE IMAGES TO BE DOWNLOADED.
+kubeversion: "v1.18"
+
+images:
+ # "images.attacher" defines the container images used for the csi attacher
+ # container.
+ attacher: quay.io/k8scsi/csi-attacher:v2.2.0
+
+ # "images.provisioner" defines the container images used for the csi provisioner
+ # container.
+ provisioner: quay.io/k8scsi/csi-provisioner:v1.6.0
+
+ # "images.snapshotter" defines the container image used for the csi snapshotter
+ snapshotter: quay.io/k8scsi/csi-snapshotter:v2.1.1
+
+ # "images.registrar" defines the container images used for the csi registrar
+ # container.
+ registrar: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
+
+ # "images.resizer" defines the container images used for the csi resizer
+ # container.
+ resizer: quay.io/k8scsi/csi-resizer:v0.5.0
diff --git a/helm/csi-unity/k8s-1.19-values.yaml b/helm/csi-unity/k8s-1.19-values.yaml
new file mode 100644
index 00000000..83281e87
--- /dev/null
+++ b/helm/csi-unity/k8s-1.19-values.yaml
@@ -0,0 +1,22 @@
+# IT IS RECOMMENDED YOU DO NOT CHANGE THE IMAGES TO BE DOWNLOADED.
+kubeversion: "v1.19"
+
+images:
+ # "images.attacher" defines the container images used for the csi attacher
+ # container.
+ attacher: quay.io/k8scsi/csi-attacher:v2.2.0
+
+ # "images.provisioner" defines the container images used for the csi provisioner
+ # container.
+ provisioner: quay.io/k8scsi/csi-provisioner:v1.6.0
+
+ # "images.snapshotter" defines the container image used for the csi snapshotter
+ snapshotter: quay.io/k8scsi/csi-snapshotter:v2.1.1
+
+ # "images.registrar" defines the container images used for the csi registrar
+ # container.
+ registrar: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
+
+ # "images.resizer" defines the container images used for the csi resizer
+ # container.
+ resizer: quay.io/k8scsi/csi-resizer:v0.5.0
diff --git a/helm/csi-unity/templates/controller.yaml b/helm/csi-unity/templates/controller.yaml
index 5226fe34..32a1e074 100644
--- a/helm/csi-unity/templates/controller.yaml
+++ b/helm/csi-unity/templates/controller.yaml
@@ -49,6 +49,13 @@ rules:
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "list", "watch", "delete"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotcontents/status"]
+ verbs: ["update"]
+ # below for resizer
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims/status"]
+ verbs: ["update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
@@ -124,6 +131,17 @@ spec:
volumeMounts:
- name: socket-dir
mountPath: /var/run/csi
+ - name: resizer
+ image: {{ required "Must provide the CSI resizer container image." .Values.images.resizer }}
+ args:
+ - "--csi-address=$(ADDRESS)"
+ - "--v=5"
+ env:
+ - name: ADDRESS
+ value: /var/run/csi/csi.sock
+ volumeMounts:
+ - name: socket-dir
+ mountPath: /var/run/csi
- name: driver
image: {{ required "Must provide the Unity driver container image." .Values.images.driver }}
args:
diff --git a/helm/csi-unity/templates/node.yaml b/helm/csi-unity/templates/node.yaml
index 6a12255c..945ff746 100644
--- a/helm/csi-unity/templates/node.yaml
+++ b/helm/csi-unity/templates/node.yaml
@@ -74,7 +74,8 @@ spec:
- name: driver-path
mountPath: /var/lib/kubelet/plugins/unity.emc.dell.com
- name: volumedevices-path
- mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices
+ mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi/pv
+ mountPropagation: "Bidirectional"
- name: pods-path
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
@@ -117,7 +118,7 @@ spec:
type: DirectoryOrCreate
- name: volumedevices-path
hostPath:
- path: /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices
+ path: /var/lib/kubelet/plugins/kubernetes.io/csi/pv
type: DirectoryOrCreate
- name: pods-path
hostPath:
diff --git a/helm/csi-unity/templates/storageclass.yaml b/helm/csi-unity/templates/storageclass.yaml
index 8b460a47..f3b8f849 100644
--- a/helm/csi-unity/templates/storageclass.yaml
+++ b/helm/csi-unity/templates/storageclass.yaml
@@ -14,6 +14,7 @@ metadata:
{{- end }}
provisioner: csi-unity.dellemc.com
reclaimPolicy: {{ $v.storageClass.reclaimPolicy | default $global.Values.storageClass.reclaimPolicy | quote }}
+allowVolumeExpansion: true
parameters:
protocol: FC
arrayId: {{ required "Must provide a arrayId" $v.name | quote }}
@@ -36,6 +37,7 @@ metadata:
{{- end }}
provisioner: csi-unity.dellemc.com
reclaimPolicy: {{ $v.storageClass.reclaimPolicy | default $global.Values.storageClass.reclaimPolicy | quote }}
+allowVolumeExpansion: true
parameters:
protocol: iSCSI
arrayId: {{ required "Must provide a arrayId" $v.name | quote }}
@@ -59,6 +61,7 @@ metadata:
{{- end }}
provisioner: csi-unity.dellemc.com
reclaimPolicy: {{ $v.storageClass.reclaimPolicy | default $global.Values.storageClass.reclaimPolicy | quote }}
+allowVolumeExpansion: true
parameters:
protocol: NFS
arrayId: {{ required "Must provide a arrayId" $v.name | quote }}
diff --git a/helm/csi-unity/templates/volumesnapshotclass.yaml b/helm/csi-unity/templates/volumesnapshotclass.yaml
index 84a98fc2..22348e07 100644
--- a/helm/csi-unity/templates/volumesnapshotclass.yaml
+++ b/helm/csi-unity/templates/volumesnapshotclass.yaml
@@ -1,6 +1,10 @@
{{ $global := . }}
{{- range $i, $v := .Values.storageArrayList }}
+{{- if eq $global.Values.kubeversion "v1.16" }}
apiVersion: snapshot.storage.k8s.io/v1alpha1
+{{- else }}
+apiVersion: snapshot.storage.k8s.io/v1beta1
+{{- end }}
kind: VolumeSnapshotClass
metadata:
{{- if $v.isDefaultArray }}
@@ -10,7 +14,12 @@ metadata:
{{ else }}
name: {{ "unity" }}-{{ $v.name | lower }}-snapclass
{{- end }}
+{{- if eq $global.Values.kubeversion "v1.16" }}
snapshotter: csi-unity.dellemc.com
+{{- else }}
+driver: csi-unity.dellemc.com
+deletionPolicy: Delete
+{{- end }}
parameters:
retentionDuration: {{ $v.snapshotClass.retentionDuration | default "" | quote }}
---
diff --git a/helm/csi-unity/values.yaml b/helm/csi-unity/values.yaml
index 265f20f7..d43f66c3 100644
--- a/helm/csi-unity/values.yaml
+++ b/helm/csi-unity/values.yaml
@@ -1,4 +1,5 @@
# Represents number of certificate secrets, which user is going to create for ssl authentication. (unity-cert-0..unity-cert-n)
+# Minimum value should be 1
certSecretCount: 1
# Time interval to add node info to array. Default 15 minutes. Minimum value should be 1.
@@ -70,7 +71,4 @@ storageClassProtocols:
- protocol: "NFS"
- protocol: "iSCSI"
-# IT IS RECOMMENDED YOU DO NOT CHANGE THE IMAGES TO BE DOWNLOADED.
-images:
- # "images.driver" defines the container images used for the driver container.
- driver: dellemc/csi-unity:v1.2.1.000R
\ No newline at end of file
+
diff --git a/helm/install.unity b/helm/install.unity
deleted file mode 100755
index 3ef84ae1..00000000
--- a/helm/install.unity
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/bin/bash
-NS="unity"
-
-# Use relative path of script invocation
-cd "`dirname \"$0\"`"
-
-source ./common.bash
-
-# Only k8s 1.14 and k8s 1.16 are supported
-if ! [ ${kMajorVersion} -eq 1 -a \( ${kMinorVersion} -eq 14 -o ${kMinorVersion} -eq 16 \) ]
-then
- echo "Only Kubernetes 1.14 and 1.16 versions are supported"
- exit 0
-fi
-
-# Verify the kubernetes installation has the feature gates needed.
-sh ./verify.kubernetes
-rc=$?
-if [ $rc -ne 0 ]
-then
- echo "*******************************************************************************"
- echo "Warning: Kubernetes --feature-gates not correctly configured... it may not work"
- echo "*******************************************************************************"
- sleep 5
-fi
-
-# Check if unity-certs-0 exists. If not, then create it
-kubectl get secrets -n "$NS" | grep unity-certs-0 --quiet
-if [ $? -ne 0 ];
- then echo "*** Couldn't find unity-certs. Creating an empty secret. Delete it and re-create it if you want to validate unisphere certificates ***"
- kubectl create -f ./emptysecret.yaml
-fi
-
-kubectl get customresourcedefinitions | grep snapshot --quiet
-if [ $? -ne 0 ]
-then
- echo "installing snapshot CRD"; kubectl create -f snapshot-crd.yaml
-fi
-
-helm version | grep "v3." --quiet
-if [ $? -eq 0 ]
-then
- echo "Installing using helm version 3"
- helm install unity --values myvalues.yaml --values csi-unity/k8s-${kMajorVersion}.${kMinorVersion}-values.yaml -n "$NS" ./csi-unity
-else
- echo "Installing using helm version 2"
- helm install --values myvalues.yaml --values csi-unity/k8s-${kMajorVersion}.${kMinorVersion}-values.yaml --name unity --namespace unity ./csi-unity
-fi
-
-waitOnRunning
-
-echo "CSIDrivers:"
-kubectl get csidrivers
-echo "CSINodes:"
-kubectl get csinode
-echo "StorageClasses:"
-kubectl get storageclass
-
diff --git a/helm/snapshot-crd.yaml b/helm/snapshot-crd-v1alpha1.yaml
similarity index 100%
rename from helm/snapshot-crd.yaml
rename to helm/snapshot-crd-v1alpha1.yaml
diff --git a/helm/uninstall.unity b/helm/uninstall.unity
deleted file mode 100755
index d308eb63..00000000
--- a/helm/uninstall.unity
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-lifecyclePresent=$(kubectl get daemonset unity-node -n unity -o yaml | grep -c lifecycle)
-if [ $lifecyclePresent -gt 0 ]; then
- echo "Removing lifecycle hooks from daemonset"
- kubectl patch daemonset unity-node -n unity --type json -p='[{"op": "replace", "path": "/spec/updateStrategy", "type":"OnDelete"}]'
- kubectl patch daemonset unity-node -n unity --type json -p='[{"op": "remove", "path": "/spec/template/spec/containers/0/lifecycle"}]'
- kubectl patch daemonset unity-node -n unity --type json -p='[{"op": "remove", "path": "/spec/template/spec/containers/1/lifecycle"}]'
- sleep 5
- kubectl delete pod -l app=unity-node -n unity --force --grace-period=0
-fi
-
-helm version | grep "v3." --quiet
-if [ $? -eq 0 ]
-then
- echo "Uninstalling using helm version 3"
- helm delete -n unity unity
-else
- echo "Uninstalling using helm version 2"
- helm delete --purge unity
-fi
-
-sleep 10
-kubectl get pods -n unity
diff --git a/helm/upgrade.unity b/helm/upgrade.unity
deleted file mode 100755
index bde12dc2..00000000
--- a/helm/upgrade.unity
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/bash
-NS="unity"
-
-# Use relative path of script invocation
-cd "`dirname \"$0\"`"
-
-source ./common.bash
-# Verify the kubernetes installation has the feature gates needed.
-sh ./verify.kubernetes
-
-# Check if unity-certs-0 exists. If not, then create it
-kubectl get secrets -n "$NS" | grep unity-certs-0 --quiet
-if [ $? -ne 0 ];
- then echo "*** Couldn't find unity-certs. Creating an empty secret. Delete it and re-create it if you want to validate unisphere certificates ***"
- kubectl create -f ./emptysecret.yaml
-fi
-
-kubectl get customresourcedefinitions | grep snapshot --quiet
-if [ $? -ne 0 ]
-then
- echo "installing snapshot CRD"; kubectl create -f snapshot-crd.yaml
-fi
-
-lifecyclePresent=$(kubectl get daemonset unity-node -n unity -o yaml | grep -c lifecycle)
-echo "lifecycle present :${lifecyclePresent}"
-if [ $lifecyclePresent -gt 0 ]; then
- echo "Removing lifecycle hooks from daemonset"
- kubectl patch daemonset unity-node -n unity --type json -p='[{"op": "replace", "path": "/spec/updateStrategy", "type":"OnDelete"}]'
- kubectl patch daemonset unity-node -n unity --type json -p='[{"op": "remove", "path": "/spec/template/spec/containers/0/lifecycle"}]'
- kubectl patch daemonset unity-node -n unity --type json -p='[{"op": "remove", "path": "/spec/template/spec/containers/1/lifecycle"}]'
- sleep 5
- kubectl delete pod -l app=unity-node -n unity --force --grace-period=0
- waitOnRunning
-fi
-
-helm version | grep "v3." --quiet
-if [ $? -eq 0 ]
-then
- echo "Upgrading using helm version 3"
- helm upgrade --values myvalues.yaml --values csi-unity/k8s-${kMajorVersion}.${kMinorVersion}-values.yaml --namespace "$NS" unity ./csi-unity
-else
- echo "Upgrading using helm version 2"
- kubectl delete csidriver unity
- helm upgrade --values myvalues.yaml --values csi-unity/k8s-${kMajorVersion}.${kMinorVersion}-values.yaml unity --namespace unity ./csi-unity
-fi
-
-waitOnRunning
-
-echo "CSIDrivers:"
-kubectl get csidrivers
-echo "CSINodes:"
-kubectl get csinode
-echo "StorageClasses:"
-kubectl get storageclass
diff --git a/helm/verify.kubernetes b/helm/verify.kubernetes
deleted file mode 100755
index 3ff3756d..00000000
--- a/helm/verify.kubernetes
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/sh
-
-# Determine the kubernetes version
-kubeversion=$(kubectl version | grep 'Server Version' | sed -e 's/^.*GitVersion:"//' -e 's/",.*//')
-echo Kubernetes version $kubeversion
-
-# Determine the nodes
-MINION_NODES=$(kubectl get nodes -o wide | grep -v -e master -e INTERNAL | awk ' { print $6; }')
-MASTER_NODES=$(kubectl get nodes -o wide | awk ' /master/{ print $6; }')
-echo Kubernetes master nodes: $MASTER_NODES
-echo Kubernetes minion nodes: $MINION_NODES
-
-# Variables used for verification
-FEATURE_GATES="VolumeSnapshotDataSource=true"
-MASTER_PROCS="/kubelet kube-apiserver kube-scheduler kube-controller-manager"
-MINION_PROCS="/kubelet"
-fail=0
-
-echo Verifying the feature gates.
-for node in $MASTER_NODES
-do
- echo ssh $node ps -ef >.ps.out
- ssh $node ps -ef >.ps.out
- for gate in $FEATURE_GATES
- do
- #echo checking $node for $gate ...
- for proc in $MASTER_PROCS
- do
- #echo proc $proc
- count=$(grep $proc .ps.out | grep -c $gate)
- #echo $node $gate $proc $count
- [ $count -ne "1" ] && { echo "node $node proc $proc gate $gate failed"; fail=1; }
- done
- done
-done
-
-for node in $MINION_NODES
-do
- echo ssh $node ps -ef >.ps.out
- ssh $node ps -ef >.ps.out
- for gate in $FEATURE_GATES
- do
- #echo checking $node for $gate ...
- for proc in $MINION_PROCS
- do
- #echo proc $proc
- count=$(grep $proc .ps.out | grep -c $gate)
- #echo $node $gate $proc $count
- [ $count -ne "1" ] && { echo "node $node proc $proc gate $gate failed"; fail=1; }
- done
- done
-done
-
-exit $fail
diff --git a/licenses/LICENSE b/licenses/LICENSE
new file mode 100644
index 00000000..261eeb9e
--- /dev/null
+++ b/licenses/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/service/controller.go b/service/controller.go
index 536cc680..a75e1115 100644
--- a/service/controller.go
+++ b/service/controller.go
@@ -6,12 +6,13 @@ package service
import (
"fmt"
+ "strconv"
+ "strings"
+
"github.com/dell/gounity/api"
"github.com/dell/gounity/util"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/timestamp"
- "strconv"
- "strings"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/dell/csi-unity/service/utils"
@@ -132,23 +133,28 @@ func (s *service) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest
thin, err := strconv.ParseBool(params[keyThinProvisioned])
if err != nil {
thin = true
- log.Debugf("Parameter %s is set to [%s]", keyThinProvisioned, thin)
+ log.Debugf("Parameter %s is set to [%t]", keyThinProvisioned, thin)
}
dataReduction, err := strconv.ParseBool(params[keyDataReductionEnabled])
if err != nil {
- log.Debugf("Parameter %s is set to [%s]", keyDataReductionEnabled, dataReduction)
+ log.Debugf("Parameter %s is set to [%t]", keyDataReductionEnabled, dataReduction)
}
- tieringPolicy, err := strconv.ParseInt(params[keyTieringPolicy], 0, 32)
+ tieringPolicy, err := strconv.ParseInt(params[keyTieringPolicy], 0, 64)
if err != nil {
tieringPolicy = 0
- log.Debugf("Parameter %s is set to [%s]", keyTieringPolicy, tieringPolicy)
+ log.Debugf("Parameter %s is set to [%d]", keyTieringPolicy, tieringPolicy)
}
hostIOLimitName := strings.TrimSpace(params[keyHostIOLimitName])
- unity, err := s.getUnityClient(arrayId)
+ hostIoSize, err := strconv.ParseInt(params[keyHostIoSize], 0, 64)
+ if err != nil {
+ hostIoSize = 8192
+ }
+
+ unity, err := s.getUnityClient(ctx, arrayId)
if err != nil {
return nil, err
}
@@ -157,8 +163,147 @@ func (s *service) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest
contentSource := req.GetVolumeContentSource()
if contentSource != nil {
volumeSource := contentSource.GetVolume()
+
if volumeSource != nil {
- return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Volume clone is not supported"))
+ sourceVolId := volumeSource.VolumeId
+ sourceVolId, _, sourceArrayId, _, err := s.validateAndGetResourceDetails(ctx, volumeSource.VolumeId, volumeType)
+ if err != nil {
+ return nil, err
+ }
+ if sourceVolId == "" {
+ return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source volume ID cannot be empty"))
+ }
+
+ log.Debugf("Cloning Volume: %s", sourceVolId)
+ if arrayId != sourceArrayId {
+ return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source volume array id: %s is different than required volume array id: %s", sourceArrayId, arrayId))
+ }
+ if protocol == NFS {
+
+ snapApi := gounity.NewSnapshot(unity)
+ fileAPI := gounity.NewFilesystem(unity)
+ filesystem, err := fileAPI.FindFilesystemById(ctx, sourceVolId)
+ isSnapshot := false
+ var snapResp *types.Snapshot
+ var snapErr error
+ if err != nil {
+ //Filesystem not found - Check if PVC exists as a snapshot [Cloned volume in case of NFS]
+ snapResp, snapErr = snapApi.FindSnapshotById(ctx, sourceVolId)
+ if snapErr != nil {
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Find source filesystem: %s Failed. Error: %v ", sourceVolId, err))
+ }
+ isSnapshot = true
+ filesystem, err = s.getFilesystemByResourceID(ctx, snapResp.SnapshotContent.StorageResource.Id, arrayId)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ err = validateCreateFsFromSnapshot(ctx, filesystem, storagePool, tieringPolicy, hostIoSize, thin, dataReduction)
+ if err != nil {
+ return nil, err
+ }
+
+ if isSnapshot {
+ // Validate the size parameter
+ snapSize := int64(snapResp.SnapshotContent.Size - AdditionalFilesystemSize)
+ if snapSize != size {
+ return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Requested size %d should be same as source filesystem size %d", size, snapSize))
+ }
+ //Idempotency check
+ snapResp, err := snapApi.FindSnapshotByName(ctx, volName)
+ if snapResp == nil {
+ //Create Volume from Snapshot(Copy snapshot on array)
+ snapResp, err = s.createFilesystemFromSnapshot(ctx, sourceVolId, volName, arrayId)
+ if err != nil {
+ return nil, err
+ }
+ } else if snapResp.SnapshotContent.Size != int64(size+AdditionalFilesystemSize) {
+ return nil, status.Error(codes.AlreadyExists, utils.GetMessageWithRunID(rid, "Snapshot with same name %s already exists in different size.", volName))
+ }
+ snapResp.SnapshotContent.Size -= AdditionalFilesystemSize
+ csiVolResp := utils.GetVolumeResponseFromSnapshot(snapResp, arrayId, protocol)
+ csiVolResp.Volume.ContentSource = req.GetVolumeContentSource()
+ return csiVolResp, nil
+ } else {
+ fsSize := int64(filesystem.FileContent.SizeTotal - AdditionalFilesystemSize)
+ if size != fsSize {
+ return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Requested size %d should be same as source volume size %d",
+ size, fsSize))
+ }
+
+ snap, err := s.createIdempotentSnapshot(ctx, req.Name, sourceVolId, desc, "", protocol, arrayId, true)
+ if err != nil {
+ return nil, err
+ }
+ csiVolResp := utils.GetVolumeResponseFromSnapshot(snap, arrayId, protocol)
+ csiVolResp.Volume.ContentSource = req.GetVolumeContentSource()
+ return csiVolResp, nil
+ }
+ } else {
+
+ volumeApi := gounity.NewVolume(unity)
+ sourceVolResp, err := volumeApi.FindVolumeById(ctx, sourceVolId)
+ if err != nil {
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Source volume not found: %s. Error: %v", sourceVolId, err))
+ }
+ // Validate the size parameter
+ if int64(sourceVolResp.VolumeContent.SizeTotal) != size {
+ return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Requested size %d should be same as source volume size %d",
+ size, int64(sourceVolResp.VolumeContent.SizeTotal)))
+ }
+ // Validate the storagePool parameter
+ if sourceVolResp.VolumeContent.Pool.Id != storagePool {
+ return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source volume storage pool %s is different than the requested storage pool %s",
+ sourceVolResp.VolumeContent.Pool.Id, storagePool))
+ }
+ //Validate the thinProvisioned parameter
+ if sourceVolResp.VolumeContent.IsThinEnabled != thin {
+ return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source volume thin provision %v is different than the requested thin provision %v",
+ sourceVolResp.VolumeContent.IsThinEnabled, thin))
+ }
+ //Validate the dataReduction parameter
+ if sourceVolResp.VolumeContent.IsDataReductionEnabled != dataReduction {
+ return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source volume data reduction %v is different than the requested data reduction %v",
+ sourceVolResp.VolumeContent.IsDataReductionEnabled, dataReduction))
+ }
+ //Validate the tieringPolicy parameter
+ if int64(sourceVolResp.VolumeContent.TieringPolicy) != tieringPolicy {
+ return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source volume tiering policy %v is different than the requested tiering policy %v",
+ sourceVolResp.VolumeContent.TieringPolicy, tieringPolicy))
+ }
+
+ volResp, _ := volumeApi.FindVolumeByName(ctx, volName)
+ if volResp != nil {
+ //Idempotency Check
+ if volResp.VolumeContent.IsThinClone && len(volResp.VolumeContent.ParentVolume.Id) > 0 && volResp.VolumeContent.ParentVolume.Id == sourceVolId &&
+ volResp.VolumeContent.SizeTotal == sourceVolResp.VolumeContent.SizeTotal {
+ log.Infof("Volume %s exists in the requested state as a clone of volume %s", volName, sourceVolResp.VolumeContent.Name)
+ csiVolResp := utils.GetVolumeResponseFromVolume(volResp, arrayId, protocol)
+ csiVolResp.Volume.ContentSource = req.GetVolumeContentSource()
+ return csiVolResp, nil
+ }
+ return nil, status.Error(codes.AlreadyExists, utils.GetMessageWithRunID(rid, "Volume with same name %s already exists", volName))
+ }
+
+ //Perform volume cloning
+ volResp, err = volumeApi.CreateCloneFromVolume(ctx, volName, sourceVolId)
+ if err != nil {
+ if err == gounity.CreateSnapshotFailedError {
+ return nil, status.Error(codes.Unknown, utils.GetMessageWithRunID(rid, "Unable to Create Snapshot for Volume Cloning for source volume: %s", sourceVolId))
+ } else if err == gounity.CloningFailedError {
+ return nil, status.Error(codes.Unknown, utils.GetMessageWithRunID(rid, "Volume cloning for source volume: %s failed.", sourceVolId))
+ }
+ }
+
+ volResp, err = volumeApi.FindVolumeByName(ctx, volName)
+ if volResp != nil {
+ csiVolResp := utils.GetVolumeResponseFromVolume(volResp, arrayId, protocol)
+ csiVolResp.Volume.ContentSource = req.GetVolumeContentSource()
+ return csiVolResp, nil
+ }
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Volume not found after create. %v", err))
+ }
}
snapshotSource := contentSource.GetSnapshot()
if snapshotSource != nil {
@@ -166,106 +311,147 @@ func (s *service) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest
if snapId == "" {
return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Source snapshot ID cannot be empty"))
}
- snapId, _, _, _, err := s.validateAndGetResourceDetails(ctx, snapId, snapshotType)
+ snapId, _, sourceArrayId, _, err := s.validateAndGetResourceDetails(ctx, snapId, snapshotType)
if err != nil {
return nil, err
}
- if protocol == NFS {
- return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Create Volume from snapshot not supported for NFS protocol"))
- }
-
log.Debugf("Creating the volume from snapshot: %s", snapId)
+ if arrayId != sourceArrayId {
+ return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source snapshot array id: %s is different than required volume array id: %s", sourceArrayId, arrayId))
+ }
snapApi := gounity.NewSnapshot(unity)
snapResp, err := snapApi.FindSnapshotById(ctx, snapId)
if err != nil {
return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Source snapshot not found: %s", snapId))
}
- volumeApi := gounity.NewVolume(unity)
- volId := snapResp.SnapshotContent.StorageResource.Id
- volId, _, _, _, err = s.validateAndGetResourceDetails(ctx, volId, volumeType)
- if err != nil {
- return nil, err
- }
+ if protocol == NFS {
- sourceVolResp, err := volumeApi.FindVolumeById(ctx, volId)
- if err != nil {
- return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Source volume not found: %s", volId))
- }
- // Validate the size is the same.
- if int64(sourceVolResp.VolumeContent.SizeTotal) != size {
- return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Requested size %d is incompatible with source volume size %d",
- size, int64(sourceVolResp.VolumeContent.SizeTotal)))
- }
+ sourceFilesystemResp, err := s.getFilesystemByResourceID(ctx, snapResp.SnapshotContent.StorageResource.Id, arrayId)
+ if err != nil {
+ return nil, err
+ }
- // Validate the storagePool is the same.
- if sourceVolResp.VolumeContent.Pool.Id != storagePool {
- return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source volume storage pool %s is different than the requested storage pool %s",
- sourceVolResp.VolumeContent.Pool.Id, storagePool))
- }
+ err = validateCreateFsFromSnapshot(ctx, sourceFilesystemResp, storagePool, tieringPolicy, hostIoSize, thin, dataReduction)
+ if err != nil {
+ return nil, err
+ }
+ // Validate the size parameter
+ snapSize := int64(snapResp.SnapshotContent.Size - AdditionalFilesystemSize)
+ if snapSize != size {
+ return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Requested size %d should be same as source snapshot size %d", size, snapSize))
+ }
- //Validate the thinProvisioned parameter
- if sourceVolResp.VolumeContent.IsThinEnabled != thin {
- return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source volume thin provision %v is different than the requested thin provision %v",
- sourceVolResp.VolumeContent.IsThinEnabled, thin))
- }
+ snapshotApi := gounity.NewSnapshot(unity)
+ snapResp, err := snapshotApi.FindSnapshotByName(ctx, volName)
+ if snapResp != nil {
+ //Idempotency check
+ if snapResp.SnapshotContent.ParentSnap.Id == snapId && snapResp.SnapshotContent.AccessType == int(gounity.ProtocolAccessType) {
+ log.Infof("Filesystem %s exists in the requested state as a volume from snapshot(snapshot on array) %s", volName, snapId)
+ snapResp.SnapshotContent.Size -= AdditionalFilesystemSize
+ csiVolResp := utils.GetVolumeResponseFromSnapshot(snapResp, arrayId, protocol)
+ csiVolResp.Volume.ContentSource = req.GetVolumeContentSource()
+ return csiVolResp, nil
+ }
+ return nil, status.Error(codes.AlreadyExists, utils.GetMessageWithRunID(rid, "Filesystem with same name %s already exists", volName))
+ }
- //Validate the dataReduction parameter
- if sourceVolResp.VolumeContent.IsDataReductionEnabled != dataReduction {
- return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source volume data reduction %v is different than the requested data reduction %v",
- sourceVolResp.VolumeContent.IsDataReductionEnabled, dataReduction))
- }
+ //Create Volume from Snapshot(Copy snapshot on array)
+ snapResp, err = s.createFilesystemFromSnapshot(ctx, snapId, volName, arrayId)
+ if err != nil {
+ return nil, err
+ }
- volResp, _ := volumeApi.FindVolumeByName(ctx, volName)
- if volResp != nil {
- //Idempotency Check
- if volResp.VolumeContent.IsThinClone == true && len(volResp.VolumeContent.ParentSnap.Id) > 0 && volResp.VolumeContent.ParentSnap.Id == snapId {
- log.Info("Volume exists in the requested state")
- csiVolResp := utils.GetVolumeResponseFromVolume(volResp, arrayId, protocol)
+ if snapResp != nil {
+ snapResp.SnapshotContent.Size -= AdditionalFilesystemSize
+ csiVolResp := utils.GetVolumeResponseFromSnapshot(snapResp, arrayId, protocol)
csiVolResp.Volume.ContentSource = req.GetVolumeContentSource()
return csiVolResp, nil
}
- return nil, status.Error(codes.AlreadyExists, utils.GetMessageWithRunID(rid, "Volume with same name %s already exists", volName))
- }
-
- if snapResp.SnapshotContent.IsAutoDelete == true {
- err = snapApi.ModifySnapshotAutoDeleteParameter(ctx, snapId)
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Filesystem: %s not found after create. Error: %v", volName, err))
+ } else {
+ volId := snapResp.SnapshotContent.StorageResource.Id
+ volumeApi := gounity.NewVolume(unity)
+ sourceVolResp, err := volumeApi.FindVolumeById(ctx, volId)
if err != nil {
- return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Unable to modify auto-delete parameter for snapshot %s", snapId))
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Source volume not found: %s", volId))
}
- }
- volResp, err = volumeApi.CreteLunThinClone(ctx, volName, snapId, volId)
- if err != nil {
- return nil, status.Error(codes.Unknown, utils.GetMessageWithRunID(rid, "Create volume from snapshot failed with error %v", err))
- }
- volResp, err = volumeApi.FindVolumeByName(ctx, volName)
- if err != nil {
- log.Debugf("Find Volume response: %v Error: %v", volResp, err)
- }
+ // Validate the size parameter
+ if snapResp.SnapshotContent.Size != size {
+ return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Requested size %d should be same as source snapshot size %d", size, snapResp.SnapshotContent.Size))
+ }
+
+ // Validate the storagePool is the same.
+ if sourceVolResp.VolumeContent.Pool.Id != storagePool {
+ return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source volume storage pool %s is different than the requested storage pool %s",
+ sourceVolResp.VolumeContent.Pool.Id, storagePool))
+ }
+
+ //Validate the thinProvisioned parameter
+ if sourceVolResp.VolumeContent.IsThinEnabled != thin {
+ return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source volume thin provision %v is different than the requested thin provision %v",
+ sourceVolResp.VolumeContent.IsThinEnabled, thin))
+ }
+
+ //Validate the dataReduction parameter
+ if sourceVolResp.VolumeContent.IsDataReductionEnabled != dataReduction {
+ return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source volume data reduction %v is different than the requested data reduction %v",
+ sourceVolResp.VolumeContent.IsDataReductionEnabled, dataReduction))
+ }
- if volResp != nil {
- csiVolResp := utils.GetVolumeResponseFromVolume(volResp, arrayId, protocol)
- csiVolResp.Volume.ContentSource = req.GetVolumeContentSource()
- return csiVolResp, nil
+ //Validate the tieringPolicy parameter
+ if int64(sourceVolResp.VolumeContent.TieringPolicy) != tieringPolicy {
+ return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source volume tiering policy %v is different than the requested tiering policy %v",
+ sourceVolResp.VolumeContent.TieringPolicy, tieringPolicy))
+ }
+
+ volResp, _ := volumeApi.FindVolumeByName(ctx, volName)
+ if volResp != nil {
+ //Idempotency Check
+ if volResp.VolumeContent.IsThinClone == true && len(volResp.VolumeContent.ParentSnap.Id) > 0 && volResp.VolumeContent.ParentSnap.Id == snapId {
+ log.Info("Volume exists in the requested state")
+ csiVolResp := utils.GetVolumeResponseFromVolume(volResp, arrayId, protocol)
+ csiVolResp.Volume.ContentSource = req.GetVolumeContentSource()
+ return csiVolResp, nil
+ }
+ return nil, status.Error(codes.AlreadyExists, utils.GetMessageWithRunID(rid, "Volume with same name %s already exists", volName))
+ }
+
+ if snapResp.SnapshotContent.IsAutoDelete == true {
+ err = snapApi.ModifySnapshotAutoDeleteParameter(ctx, snapId)
+ if err != nil {
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Unable to modify auto-delete parameter for snapshot %s", snapId))
+ }
+ }
+
+ volResp, err = volumeApi.CreteLunThinClone(ctx, volName, snapId, volId)
+ if err != nil {
+ return nil, status.Error(codes.Unknown, utils.GetMessageWithRunID(rid, "Create volume from snapshot failed with error %v", err))
+ }
+ volResp, err = volumeApi.FindVolumeByName(ctx, volName)
+ if err != nil {
+ log.Debugf("Find Volume response: %v Error: %v", volResp, err)
+ }
+
+ if volResp != nil {
+ csiVolResp := utils.GetVolumeResponseFromVolume(volResp, arrayId, protocol)
+ csiVolResp.Volume.ContentSource = req.GetVolumeContentSource()
+ return csiVolResp, nil
+ }
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Volume not found after create. %v", err))
}
- return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Volume not found after create. %v", err))
}
}
if protocol == NFS {
+
nasServer, ok := params[keyNasServer]
if !ok {
return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "`%s` is a required parameter", keyNasServer))
}
- hostIoSize, err := strconv.ParseInt(params[keyHostIoSize], 0, 32)
- if err != nil {
- log.Debug("Host IO Size for NFS has not been provided and hence setting default value 8192")
- hostIoSize = 8192
- }
-
//Add AdditionalFilesystemSize in size as Unity use this much size for metadata in filesystem
size += AdditionalFilesystemSize
@@ -368,6 +554,7 @@ func (s *service) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest
resp, err = volumeApi.FindVolumeByName(ctx, volName)
if resp != nil {
volumeResp := utils.GetVolumeResponseFromVolume(resp, arrayId, protocol)
+ log.Debugf("CreateVolume successful for volid: [%s]", volumeResp.Volume.VolumeId)
return volumeResp, nil
}
}
@@ -390,9 +577,32 @@ func (s *service) DeleteVolume(
return nil, err
}
deleteVolumeResp := &csi.DeleteVolumeResponse{}
-
+ var snapErr error
//Not validating protocol here to support deletion of pvcs from v1.0
if protocol != NFS {
+ //Check stale snapshots used for volume cloning and delete if exist
+ snapshotApi := gounity.NewSnapshot(unity)
+ snapsResp, _, snapshotErr := snapshotApi.ListSnapshots(ctx, 0, 0, volId, "")
+ if snapshotErr != nil {
+ return nil, status.Error(codes.FailedPrecondition, utils.GetMessageWithRunID(rid, "List snapshots for volume %s failed with error: %v", volId, snapshotErr))
+ }
+ totalSnaps := len(snapsResp)
+ for _, snapResp := range snapsResp {
+ snapshotName := snapResp.SnapshotContent.Name
+ if strings.Contains(snapshotName, gounity.SnapForClone) {
+ reqDeleteSnapshot := new(csi.DeleteSnapshotRequest)
+ reqDeleteSnapshot.SnapshotId = snapResp.SnapshotContent.ResourceId
+ _, snapshotErr = s.DeleteSnapshot(ctx, reqDeleteSnapshot)
+ if snapshotErr != nil {
+ return nil, status.Error(codes.FailedPrecondition, utils.GetMessageWithRunID(rid, "Volume %s can not be deleted as it has associated snapshots.", volId))
+ }
+ totalSnaps -= 1
+ }
+ }
+ if totalSnaps > 0 {
+ return nil, status.Error(codes.FailedPrecondition, utils.GetMessageWithRunID(rid, "Volume %s can not be deleted as it has associated snapshots.", volId))
+ }
+ //Delete the block volume
volumeAPI := gounity.NewVolume(unity)
err = volumeAPI.DeleteVolume(ctx, volId)
} else {
@@ -404,14 +614,38 @@ func (s *service) DeleteVolume(
return nil, status.Error(codes.FailedPrecondition, utils.GetMessageWithRunID(rid, "Filesystem %s can not be deleted as it has associated NFS or SMB shares.", volId))
}
err = fileAPI.DeleteFilesystem(ctx, volId)
+ } else {
+ //Do not reuse err as it is used for idempotency check
+ snapshotApi := gounity.NewSnapshot(unity)
+ snapResp, fsSnapErr := snapshotApi.FindSnapshotById(ctx, volId)
+ snapErr = fsSnapErr
+ if fsSnapErr == nil {
+ //Validate if snapshot has any nfs shares
+ sourceVolId, err := fileAPI.GetFilesystemIdFromResId(ctx, snapResp.SnapshotContent.StorageResource.Id)
+ if err != nil {
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Source storage resource: %s filesystem Id not found. Error: %v", snapResp.SnapshotContent.StorageResource.Id, err))
+ }
+ filesystemResp, err = fileAPI.FindFilesystemById(ctx, sourceVolId)
+ if err != nil {
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Find source filesystem: %s failed with error: %v", sourceVolId, err))
+ }
+ for _, nfsShare := range filesystemResp.FileContent.NFSShare {
+ if nfsShare.ParentSnap.Id == volId {
+ return nil, status.Error(codes.FailedPrecondition, utils.GetMessageWithRunID(rid, "Snapshot %s can not be deleted as it has associated NFS or SMB shares.", volId))
+ }
+ }
+ err = snapshotApi.DeleteSnapshot(ctx, volId)
+ }
}
}
//Idempotency check
if err == nil {
+ log.Debugf("DeleteVolume successful for volid: [%s]", req.VolumeId)
return deleteVolumeResp, nil
- } else if err == gounity.FilesystemNotFoundError || err == gounity.VolumeNotFoundError {
+ } else if err == gounity.FilesystemNotFoundError || err == gounity.VolumeNotFoundError || snapErr == gounity.SnapshotNotFoundError {
log.Debug("Volume not found on array")
+ log.Debugf("DeleteVolume successful for volid: [%s]", req.VolumeId)
return deleteVolumeResp, nil
}
return nil, status.Error(codes.FailedPrecondition, utils.GetMessageWithRunID(rid, "Delete Volume %s failed with error: %v", volId, err))
@@ -510,29 +744,61 @@ func (s *service) ControllerPublishVolume(
if err != nil {
return nil, status.Error(codes.Unknown, utils.GetMessageWithRunID(rid, "Export Volume Failed %v", err))
}
-
+ log.Debugf("ControllerPublishVolume successful for volid: [%s]", req.GetVolumeId())
return &csi.ControllerPublishVolumeResponse{PublishContext: pinfo}, nil
} else {
pinfo["filesystem"] = volID
fileAPI := gounity.NewFilesystem(unity)
+ isSnapshot := false
filesystemResp, err := fileAPI.FindFilesystemById(ctx, volID)
+ var snapResp *types.Snapshot
if err != nil {
- return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Find Filesystem Failed with error: %v", err))
+ snapshotApi := gounity.NewSnapshot(unity)
+ snapResp, err = snapshotApi.FindSnapshotById(ctx, volID)
+ if err != nil {
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Find filesystem: %s failed with error: %v", volID, err))
+ }
+ isSnapshot = true
+
+ filesystemResp, err = s.getFilesystemByResourceID(ctx, snapResp.SnapshotContent.StorageResource.Id, arrayId)
+ if err != nil {
+ return nil, err
+ }
}
//Create NFS Share if not already present on array
nfsShareName := NFSShareNamePrefix + filesystemResp.FileContent.Name
+ if isSnapshot {
+ nfsShareName = NFSShareNamePrefix + snapResp.SnapshotContent.Name
+ }
nfsShareExist := false
var nfsShareID string
for _, nfsShare := range filesystemResp.FileContent.NFSShare {
- if nfsShare.Name == nfsShareName {
- nfsShareExist = true
- nfsShareID = nfsShare.Id
+ if isSnapshot {
+ if nfsShare.Path == NFSShareLocalPath && nfsShare.ParentSnap.Id == volID {
+ nfsShareExist = true
+ nfsShareName = nfsShare.Name
+ nfsShareID = nfsShare.Id
+ }
+ } else {
+ if nfsShare.Path == NFSShareLocalPath && nfsShare.ParentSnap.Id == "" {
+ nfsShareExist = true
+ nfsShareName = nfsShare.Name
+ nfsShareID = nfsShare.Id
+ }
}
}
if !nfsShareExist {
- filesystemResp, err = fileAPI.CreateNFSShare(ctx, nfsShareName, NFSShareLocalPath, volID, gounity.NoneDefaultAccess)
- if err != nil {
- return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Create NFS Share failed. Error: %v", err))
+ if isSnapshot {
+ nfsShareResp, err := fileAPI.CreateNFSShareFromSnapshot(ctx, nfsShareName, NFSShareLocalPath, volID, gounity.NoneDefaultAccess)
+ if err != nil {
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Create NFS Share failed. Error: %v", err))
+ }
+ nfsShareID = nfsShareResp.NFSShareContent.Id
+ } else {
+ filesystemResp, err = fileAPI.CreateNFSShare(ctx, nfsShareName, NFSShareLocalPath, volID, gounity.NoneDefaultAccess)
+ if err != nil {
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Create NFS Share failed. Error: %v", err))
+ }
}
for _, nfsShare := range filesystemResp.FileContent.NFSShare {
if nfsShare.Name == nfsShareName {
@@ -542,7 +808,10 @@ func (s *service) ControllerPublishVolume(
}
//Allocate host access to NFS Share with appropriate access mode
- nfsShareResp, _ := fileAPI.FindNFSShareById(ctx, nfsShareID)
+ nfsShareResp, err := fileAPI.FindNFSShareById(ctx, nfsShareID)
+ if err != nil {
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Find NFS Share: %s failed. Error: %v", nfsShareID, err))
+ }
readOnlyHosts := nfsShareResp.NFSShareContent.ReadOnlyHosts
readWriteHosts := nfsShareResp.NFSShareContent.ReadWriteHosts
readOnlyRootHosts := nfsShareResp.NFSShareContent.ReadOnlyRootAccessHosts
@@ -607,15 +876,24 @@ func (s *service) ControllerPublishVolume(
}
if am.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY {
readHostIDList = append(readHostIDList, hostID)
- err = fileAPI.ModifyNFSShareHostAccess(ctx, volID, nfsShareID, readHostIDList, gounity.ReadOnlyRootAccessType)
+ if isSnapshot {
+ err = fileAPI.ModifyNFSShareCreatedFromSnapshotHostAccess(ctx, nfsShareID, readHostIDList, gounity.ReadOnlyRootAccessType)
+ } else {
+ err = fileAPI.ModifyNFSShareHostAccess(ctx, volID, nfsShareID, readHostIDList, gounity.ReadOnlyRootAccessType)
+ }
} else {
readWriteHostIDList = append(readWriteHostIDList, hostID)
- err = fileAPI.ModifyNFSShareHostAccess(ctx, volID, nfsShareID, readWriteHostIDList, gounity.ReadWriteRootAccessType)
+ if isSnapshot {
+ err = fileAPI.ModifyNFSShareCreatedFromSnapshotHostAccess(ctx, nfsShareID, readWriteHostIDList, gounity.ReadWriteRootAccessType)
+ } else {
+ err = fileAPI.ModifyNFSShareHostAccess(ctx, volID, nfsShareID, readWriteHostIDList, gounity.ReadWriteRootAccessType)
+ }
}
if err != nil {
return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Allocating host %s access to NFS Share failed. Error: %v", nodeID, err))
}
log.Debugf("NFS Share: %s is accessible to host: %s with access mode: %s", nfsShareID, nodeID, am.Mode)
+ log.Debugf("ControllerPublishVolume successful for volid: [%s]", req.GetVolumeId())
return &csi.ControllerPublishVolumeResponse{PublishContext: pinfo}, nil
}
}
@@ -673,28 +951,60 @@ func (s *service) ControllerUnpublishVolume(
} else {
log.Info(fmt.Sprintf("The given Node %s does not have access on the given volume %s. Already in Unpublished state.", hostID, volID))
}
-
+ log.Debugf("ControllerUnpublishVolume successful for volid: [%s]", req.GetVolumeId())
return &csi.ControllerUnpublishVolumeResponse{}, nil
} else {
fileAPI := gounity.NewFilesystem(unity)
+ isSnapshot := false
filesystem, err := fileAPI.FindFilesystemById(ctx, volID)
+ var snapResp *types.Snapshot
if err != nil {
- // If the filesysten isn't found, k8s will retry Controller Unpublish forever so...
- // There is no way back if filesystem isn't found and so considering this scenario idempotent
- if err == gounity.FilesystemNotFoundError {
- log.Debugf("Filesystem %s not found on the array %s during Controller Unpublish. Hence considering the call to be idempotent", volID, arrayId)
- return &csi.ControllerUnpublishVolumeResponse{}, nil
+ snapshotApi := gounity.NewSnapshot(unity)
+ snapResp, err = snapshotApi.FindSnapshotById(ctx, volID)
+ if err != nil {
+ // If the filesysten isn't found, k8s will retry Controller Unpublish forever so...
+ // There is no way back if filesystem isn't found and so considering this scenario idempotent
+ if err == gounity.FilesystemNotFoundError || err == gounity.SnapshotNotFoundError {
+ log.Debugf("Filesystem %s not found on the array %s during Controller Unpublish. Hence considering the call to be idempotent", volID, arrayId)
+ return &csi.ControllerUnpublishVolumeResponse{}, nil
+ }
+ return nil, status.Error(codes.Internal, utils.GetMessageWithRunID(rid, "Find filesystem %s failed with error: %v", volID, err))
+ }
+ isSnapshot = true
+ filesystem, err = s.getFilesystemByResourceID(ctx, snapResp.SnapshotContent.StorageResource.Id, arrayId)
+ if err != nil {
+ return nil, err
}
- return nil, status.Error(codes.Internal, utils.GetMessageWithRunID(rid, "%v", err))
}
//Remove host access from NFS Share
nfsShareName := NFSShareNamePrefix + filesystem.FileContent.Name
+ if isSnapshot {
+ nfsShareName = NFSShareNamePrefix + snapResp.SnapshotContent.Name
+ }
shareExists := false
+ deleteShare := true
var nfsShareID string
for _, nfsShare := range filesystem.FileContent.NFSShare {
- if nfsShare.Name == nfsShareName {
- shareExists = true
- nfsShareID = nfsShare.Id
+ if isSnapshot {
+ if nfsShare.Path == NFSShareLocalPath && nfsShare.ParentSnap.Id == volID {
+ shareExists = true
+ if nfsShare.Name != nfsShareName {
+ //This means that share was created manually on array, hence don't delete via driver
+ deleteShare = false
+ nfsShareName = nfsShare.Name
+ }
+ nfsShareID = nfsShare.Id
+ }
+ } else {
+ if nfsShare.Path == NFSShareLocalPath && nfsShare.ParentSnap.Id == "" {
+ shareExists = true
+ if nfsShare.Name != nfsShareName {
+ //This means that share was created manually on array, hence don't delete via driver
+ deleteShare = false
+ nfsShareName = nfsShare.Name
+ }
+ nfsShareID = nfsShare.Id
+ }
}
}
if !shareExists {
@@ -702,7 +1012,10 @@ func (s *service) ControllerUnpublishVolume(
return &csi.ControllerUnpublishVolumeResponse{}, nil
}
- nfsShareResp, _ := fileAPI.FindNFSShareById(ctx, nfsShareID)
+ nfsShareResp, err := fileAPI.FindNFSShareById(ctx, nfsShareID)
+ if err != nil {
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Find NFS Share: %s failed. Error: %v", nfsShareID, err))
+ }
readOnlyHosts := nfsShareResp.NFSShareContent.ReadOnlyHosts
readWriteHosts := nfsShareResp.NFSShareContent.ReadWriteHosts
readOnlyRootHosts := nfsShareResp.NFSShareContent.ReadOnlyRootAccessHosts
@@ -754,9 +1067,17 @@ func (s *service) ControllerUnpublishVolume(
return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Cannot remove host access. Host: %s has access on NFS Share: %s with incompatible access mode.", nodeID, nfsShareID))
}
if foundReadOnly {
- err = fileAPI.ModifyNFSShareHostAccess(ctx, volID, nfsShareID, readHostIDList, gounity.ReadOnlyRootAccessType)
+ if isSnapshot {
+ err = fileAPI.ModifyNFSShareCreatedFromSnapshotHostAccess(ctx, nfsShareID, readHostIDList, gounity.ReadOnlyRootAccessType)
+ } else {
+ err = fileAPI.ModifyNFSShareHostAccess(ctx, volID, nfsShareID, readHostIDList, gounity.ReadOnlyRootAccessType)
+ }
} else if foundReadWrite {
- err = fileAPI.ModifyNFSShareHostAccess(ctx, volID, nfsShareID, readWriteHostIDList, gounity.ReadWriteRootAccessType)
+ if isSnapshot {
+ err = fileAPI.ModifyNFSShareCreatedFromSnapshotHostAccess(ctx, nfsShareID, readWriteHostIDList, gounity.ReadWriteRootAccessType)
+ } else {
+ err = fileAPI.ModifyNFSShareHostAccess(ctx, volID, nfsShareID, readWriteHostIDList, gounity.ReadWriteRootAccessType)
+ }
} else {
//Idempotent case
log.Infof("Host: %s has no access on NFS Share: %s", nodeID, nfsShareID)
@@ -767,16 +1088,21 @@ func (s *service) ControllerUnpublishVolume(
log.Debugf("Host: %s access is removed from NFS Share: %s", nodeID, nfsShareID)
//Delete NFS Share
- if otherHostsWithAccess > 0 {
- return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "NFS Share: %s can not be deleted as other hosts have access on it.", nfsShareID))
- }
-
- err = fileAPI.DeleteNFSShare(ctx, filesystem.FileContent.Id, nfsShareID)
- if err != nil {
- return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Delete NFS Share: %s Failed with error: %v", nfsShareID, err))
+ if deleteShare {
+ if otherHostsWithAccess > 0 {
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "NFS Share: %s can not be deleted as other hosts have access on it.", nfsShareID))
+ }
+ if isSnapshot {
+ err = fileAPI.DeleteNFSShareCreatedFromSnapshot(ctx, nfsShareID)
+ } else {
+ err = fileAPI.DeleteNFSShare(ctx, filesystem.FileContent.Id, nfsShareID)
+ }
+ if err != nil {
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Delete NFS Share: %s Failed with error: %v", nfsShareID, err))
+ }
+ log.Debugf("NFS Share: %s deleted successfully.", nfsShareID)
}
-
- log.Debugf("NFS Share: %s deleted successfully.", nfsShareID)
+ log.Debugf("ControllerUnpublishVolume successful for volid: [%s]", req.GetVolumeId())
return &csi.ControllerUnpublishVolumeResponse{}, nil
}
}
@@ -847,48 +1173,22 @@ func (s *service) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotReq
}
//Source volume is for volume clone or snapshot clone
- volId, protocol, arrayId, unity, err := s.validateAndGetResourceDetails(ctx, req.SourceVolumeId, volumeType)
+ volId, protocol, arrayId, _, err := s.validateAndGetResourceDetails(ctx, req.SourceVolumeId, volumeType)
if err != nil {
return nil, err
}
+
ctx, log = setArrayIdContext(ctx, arrayId)
if err := s.requireProbe(ctx, arrayId); err != nil {
return nil, err
}
- var filesystem *types.Filesystem
- fileAPI := gounity.NewFilesystem(unity)
- var sourceStorageResId string
- if protocol == NFS {
- filesystem, err = fileAPI.FindFilesystemById(ctx, volId)
- if err != nil {
- return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Find source filesystem: %s Failed. Error: %v ", volId, err))
- }
- sourceStorageResId = filesystem.FileContent.StorageResource.Id
- } else {
- sourceStorageResId = volId
- }
-
- snapApi := gounity.NewSnapshot(unity)
- //Idempotenc check
- snap, _ := snapApi.FindSnapshotByName(ctx, req.Name)
- if snap != nil {
- if snap.SnapshotContent.StorageResource.Id == sourceStorageResId {
- log.Infof("Snapshot already exists with same name %s for same storage resource %s", req.Name, req.SourceVolumeId)
- return utils.GetSnapshotResponseFromSnapshot(snap, protocol, arrayId), nil
- }
- return nil, status.Error(codes.AlreadyExists, utils.GetMessageWithRunID(rid, "Snapshot with same name %s already exists for storage resource %s", req.Name, snap.SnapshotContent.StorageResource.Id))
- }
- newSnapshot, err := snapApi.CreateSnapshot(ctx, sourceStorageResId, req.Name, req.Parameters["description"], req.Parameters["retentionDuration"])
+ //Idempotency check
+ snap, err := s.createIdempotentSnapshot(ctx, req.Name, volId, req.Parameters["description"], req.Parameters["retentionDuration"], protocol, arrayId, false)
if err != nil {
- return nil, status.Error(codes.Unknown, utils.GetMessageWithRunID(rid, "Create Snapshot error: %v", err))
- }
- newSnapshot, _ = snapApi.FindSnapshotByName(ctx, req.Name)
- if newSnapshot != nil {
- return utils.GetSnapshotResponseFromSnapshot(newSnapshot, protocol, arrayId), nil
- } else {
- return nil, status.Error(codes.Unknown, utils.GetMessageWithRunID(rid, "Find Snapshot error after create. %v", err))
+ return nil, err
}
+ return utils.GetSnapshotResponseFromSnapshot(snap, protocol, arrayId), nil
}
func (s *service) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
@@ -920,11 +1220,59 @@ func (s *service) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotReq
}
delSnapResponse := &csi.DeleteSnapshotResponse{}
+ log.Debugf("Delete snapshot successful [%s]", req.SnapshotId)
return delSnapResponse, nil
}
func (s *service) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) {
- return nil, status.Error(codes.Unimplemented, "ListSnapshots is not implemented")
+ ctx, log, rid := GetRunidLog(ctx)
+ log.Infof("Executing ListSnapshot with args: %+v", *req)
+
+ var (
+ startToken int
+ err error
+ maxEntries = int(req.MaxEntries)
+ )
+ snapId, protocol, arrayId, unity, err := s.validateAndGetResourceDetails(ctx, req.SnapshotId, snapshotType)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx, log = setArrayIdContext(ctx, arrayId)
+ if err := s.requireProbe(ctx, arrayId); err != nil {
+ return nil, err
+ }
+
+ snapApi := gounity.NewSnapshot(unity)
+
+ //Limiting the number of snapshots to 100 to avoid timeout issues
+ if maxEntries > MAX_ENTRIES_SNAPSHOT || maxEntries == 0 {
+ maxEntries = MAX_ENTRIES_SNAPSHOT
+ }
+
+ if req.StartingToken != "" {
+ i, err := strconv.ParseInt(req.StartingToken, 10, 64)
+ if err != nil {
+ return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Unable to parse StartingToken: %v into uint32", req.StartingToken))
+ }
+ startToken = int(i)
+ }
+
+ snaps, nextToken, err := snapApi.ListSnapshots(ctx, startToken, maxEntries, "", snapId)
+ if err != nil {
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Unable to get the snapshots: %v", err))
+ }
+
+ // Process the source snapshots and make CSI Snapshot
+ entries, err := s.getCSISnapshots(snaps, req.SourceVolumeId, protocol, arrayId)
+ if err != nil {
+ return nil, status.Error(codes.Unknown, utils.GetMessageWithRunID(rid, err.Error()))
+ }
+ log.Debugf("ListSnapshot successful for snapid: [%s]", req.SnapshotId)
+ return &csi.ListSnapshotsResponse{
+ Entries: entries,
+ NextToken: strconv.Itoa(nextToken),
+ }, nil
}
func (s *service) controllerProbe(ctx context.Context, arrayId string) error {
@@ -935,7 +1283,7 @@ func (s *service) controllerProbe(ctx context.Context, arrayId string) error {
// Default supports all capabilities
func (cs *service) ControllerGetCapabilities(ctx context.Context, req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) {
ctx, log, _ := GetRunidLog(ctx)
- log.Debug("Executing ControllerGetCapabilities with args: %+v", *req)
+ log.Debugf("Executing ControllerGetCapabilities with args: %+v", *req)
return &csi.ControllerGetCapabilitiesResponse{
Capabilities: []*csi.ControllerServiceCapability{
&csi.ControllerServiceCapability{
@@ -980,6 +1328,20 @@ func (cs *service) ControllerGetCapabilities(ctx context.Context, req *csi.Contr
},
},
},
+ &csi.ControllerServiceCapability{
+ Type: &csi.ControllerServiceCapability_Rpc{
+ Rpc: &csi.ControllerServiceCapability_RPC{
+ Type: csi.ControllerServiceCapability_RPC_EXPAND_VOLUME,
+ },
+ },
+ },
+ &csi.ControllerServiceCapability{
+ Type: &csi.ControllerServiceCapability_Rpc{
+ Rpc: &csi.ControllerServiceCapability_RPC{
+ Type: csi.ControllerServiceCapability_RPC_CLONE_VOLUME,
+ },
+ },
+ },
},
}, nil
}
@@ -992,10 +1354,11 @@ func (s *service) ControllerExpandVolume(ctx context.Context, req *csi.Controlle
return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "volumeId is mandatory parameter"))
}
- volId, _, arrayId, unity, err := s.validateAndGetResourceDetails(ctx, req.VolumeId, volumeType)
+ volId, protocol, arrayId, unity, err := s.validateAndGetResourceDetails(ctx, req.VolumeId, volumeType)
if err != nil {
return nil, err
}
+
ctx, log = setArrayIdContext(ctx, arrayId)
if err := s.requireProbe(ctx, arrayId); err != nil {
return nil, err
@@ -1011,41 +1374,86 @@ func (s *service) ControllerExpandVolume(ctx context.Context, req *csi.Controlle
}
}
if capacity <= 0 {
- return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "required bytes can not be 0 or less"))
+ return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Required bytes can not be 0 or less"))
}
- volumeApi := gounity.NewVolume(unity)
- //Idempotency check
- volume, err := volumeApi.FindVolumeById(ctx, volId)
- if err != nil {
- return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "unable to find the volume"))
+ expandVolumeResp := &csi.ControllerExpandVolumeResponse{
+ CapacityBytes: capacity,
}
- if volume.VolumeContent.SizeTotal > uint64(capacity) {
- return nil, status.Error(codes.Unknown, utils.GetMessageWithRunID(rid, "requested new capacity smaller than existing capacity"))
- }
+ if protocol == NFS {
+ //Adding Additional size used for metadata
+ capacity += AdditionalFilesystemSize
+ filesystemApi := gounity.NewFilesystem(unity)
- volumeResp := &csi.ControllerExpandVolumeResponse{
- CapacityBytes: capacity,
- }
- if volume.VolumeContent.SizeTotal == uint64(capacity) {
- log.Infof("New Volume size (%d) is same as existing Volume size. Ignoring expand volume operation.", volume.VolumeContent.SizeTotal)
- volumeResp.NodeExpansionRequired = false
- return volumeResp, nil
- }
+ filesystem, err := filesystemApi.FindFilesystemById(ctx, volId)
+ if err != nil {
+ snapshotApi := gounity.NewSnapshot(unity)
+ _, err = snapshotApi.FindSnapshotById(ctx, volId)
+ if err != nil {
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Find filesystem %s failed with error: %v", volId, err))
+ }
+ return nil, status.Error(codes.Unimplemented, utils.GetMessageWithRunID(rid, "Expand Volume not supported for cloned filesystems(snapshot on array)"))
+ }
- err = volumeApi.ExpandVolume(ctx, volId, uint64(capacity))
- if err != nil {
- return nil, status.Error(codes.Unknown, utils.GetMessageWithRunID(rid, "unable to expand volume. Error %v", err))
- }
+ //Idempotency check
+ if filesystem.FileContent.SizeTotal >= uint64(capacity) {
+ log.Infof("New Filesystem size (%d) is same as existing Filesystem size. Ignoring expand volume operation.", filesystem.FileContent.SizeTotal)
+ expandVolumeResp := &csi.ControllerExpandVolumeResponse{
+ CapacityBytes: 0,
+ }
+ expandVolumeResp.NodeExpansionRequired = false
+ return expandVolumeResp, nil
+ }
- volume, err = volumeApi.FindVolumeById(ctx, volId)
- if err != nil {
- return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "unable to find the volume"))
+ err = filesystemApi.ExpandFilesystem(ctx, volId, uint64(capacity))
+ if err != nil {
+ return nil, status.Error(codes.Unknown, utils.GetMessageWithRunID(rid, "Expand filesystem failed with error: %v", err))
+ }
+
+ filesystem, err = filesystemApi.FindFilesystemById(ctx, volId)
+ if err != nil {
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Find filesystem failed with error: %v", err))
+ }
+ expandVolumeResp.CapacityBytes = int64(filesystem.FileContent.SizeTotal) - AdditionalFilesystemSize
+ expandVolumeResp.NodeExpansionRequired = false
+ return expandVolumeResp, err
+ } else {
+ volumeApi := gounity.NewVolume(unity)
+ //Idempotency check
+ volume, err := volumeApi.FindVolumeById(ctx, volId)
+ if err != nil {
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Find volume failed with error: %v", err))
+ }
+
+ nodeExpansionRequired := false
+ content := volume.VolumeContent
+ if len(content.HostAccessResponse) >= 1 { //If the volume has 1 or more host access then set nodeExpansionRequired as true
+ nodeExpansionRequired = true
+ }
+
+ if volume.VolumeContent.SizeTotal >= uint64(capacity) {
+ log.Infof("New Volume size (%d) is same as existing Volume size. Ignoring expand volume operation.", volume.VolumeContent.SizeTotal)
+ expandVolumeResp := &csi.ControllerExpandVolumeResponse{
+ CapacityBytes: 0,
+ }
+ expandVolumeResp.NodeExpansionRequired = nodeExpansionRequired
+ return expandVolumeResp, nil
+ }
+
+ err = volumeApi.ExpandVolume(ctx, volId, uint64(capacity))
+ if err != nil {
+ return nil, status.Error(codes.Unknown, utils.GetMessageWithRunID(rid, "Expand volume failed with error: %v", err))
+ }
+
+ volume, err = volumeApi.FindVolumeById(ctx, volId)
+ if err != nil {
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Find volume failed with error: %v", err))
+ }
+ expandVolumeResp.CapacityBytes = int64(volume.VolumeContent.SizeTotal)
+ expandVolumeResp.NodeExpansionRequired = nodeExpansionRequired
+ return expandVolumeResp, err
}
- volumeResp.CapacityBytes = int64(volume.VolumeContent.SizeTotal)
- volumeResp.NodeExpansionRequired = true
- return volumeResp, err
}
func (s *service) getCSIVolumes(volumes []types.Volume) ([]*csi.ListVolumesResponse_Entry, error) {
@@ -1073,7 +1481,7 @@ func (s *service) getCSIVolumes(volumes []types.Volume) ([]*csi.ListVolumesRespo
return entries, nil
}
-func (s *service) getCSISnapshots(snaps []types.Snapshot) ([]*csi.ListSnapshotsResponse_Entry, error) {
+func (s *service) getCSISnapshots(snaps []types.Snapshot, volId, protocol, arrayId string) ([]*csi.ListSnapshotsResponse_Entry, error) {
entries := make([]*csi.ListSnapshotsResponse_Entry, len(snaps))
for i, snap := range snaps {
isReady := false
@@ -1085,11 +1493,17 @@ func (s *service) getCSISnapshots(snaps []types.Snapshot) ([]*csi.ListSnapshotsR
timestamp, _ = ptypes.TimestampProto(snap.SnapshotContent.CreationTime)
}
+ snapId := fmt.Sprintf("%s-%s-%s-%s", snap.SnapshotContent.Name, protocol, arrayId, snap.SnapshotContent.ResourceId)
+
+ size := snap.SnapshotContent.Size
+ if protocol == NFS {
+ size -= AdditionalFilesystemSize
+ }
//Create CSI Snapshot
vi := &csi.Snapshot{
- SizeBytes: snap.SnapshotContent.Size,
- SnapshotId: snap.SnapshotContent.ResourceId,
- SourceVolumeId: snap.SnapshotContent.StorageResource.Id,
+ SizeBytes: size,
+ SnapshotId: snapId,
+ SourceVolumeId: volId,
CreationTime: timestamp,
ReadyToUse: isReady,
}
@@ -1113,3 +1527,121 @@ func (s *service) validateAndGetProtocol(ctx context.Context, protocol, scProtoc
}
return protocol, nil
}
+
+func (s *service) getFilesystemByResourceID(ctx context.Context, resourceID, arrayID string) (*types.Filesystem, error) {
+ ctx, _, rid := GetRunidLog(ctx)
+ unity, err := s.getUnityClient(ctx, arrayID)
+ if err != nil {
+ return nil, err
+ }
+ filesystemAPI := gounity.NewFilesystem(unity)
+
+ filesystemID, err := filesystemAPI.GetFilesystemIdFromResId(ctx, resourceID)
+ if err != nil {
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Storage resource: %s filesystem Id not found. Error: %v", resourceID, err))
+ }
+ sourceFilesystemResp, err := filesystemAPI.FindFilesystemById(ctx, filesystemID)
+ if err != nil {
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Filesystem: %s not found. Error: %v", filesystemID, err))
+ }
+ return sourceFilesystemResp, nil
+}
+
+//Create Volume from Snapshot(Copy snapshot on array)
+func (s *service) createFilesystemFromSnapshot(ctx context.Context, snapID, volumeName, arrayID string) (*types.Snapshot, error) {
+ ctx, _, rid := GetRunidLog(ctx)
+ unity, err := s.getUnityClient(ctx, arrayID)
+ if err != nil {
+ return nil, err
+ }
+ snapshotAPI := gounity.NewSnapshot(unity)
+
+ snapResp, err := snapshotAPI.CopySnapshot(ctx, snapID, volumeName)
+ if err != nil {
+ return nil, status.Error(codes.Unknown, utils.GetMessageWithRunID(rid, "Create Filesystem from snapshot failed with error. Error: %v", err))
+ }
+
+ snapResp, err = snapshotAPI.FindSnapshotByName(ctx, volumeName)
+ if err != nil {
+ return nil, status.Error(codes.Unknown, utils.GetMessageWithRunID(rid, "Create Filesystem from snapshot failed with error. Error: %v", err))
+ }
+
+ return snapResp, nil
+}
+
+func (s *service) createIdempotentSnapshot(ctx context.Context, snapshotName, sourceVolID, description, retentionDuration, protocol, arrayID string, isClone bool) (*types.Snapshot, error) {
+ ctx, log, rid := GetRunidLog(ctx)
+ unity, err := s.getUnityClient(ctx, arrayID)
+ if err != nil {
+ return nil, err
+ }
+ snapshotAPI := gounity.NewSnapshot(unity)
+
+ isSnapshot := false
+ var snapResp *types.Snapshot
+ var filesystemResp *types.Filesystem
+ if protocol == NFS {
+ fileAPI := gounity.NewFilesystem(unity)
+ filesystemResp, err = fileAPI.FindFilesystemById(ctx, sourceVolID)
+ if err != nil {
+ snapshotApi := gounity.NewSnapshot(unity)
+ snapResp, err = snapshotApi.FindSnapshotById(ctx, sourceVolID)
+ if err != nil {
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Find source filesystem: %s failed with error: %v", sourceVolID, err))
+ }
+ isSnapshot = true
+ filesystemResp, err = s.getFilesystemByResourceID(ctx, snapResp.SnapshotContent.StorageResource.Id, arrayID)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if protocol == NFS && !isSnapshot {
+ sourceVolID = filesystemResp.FileContent.StorageResource.Id
+ }
+
+ snap, _ := snapshotAPI.FindSnapshotByName(ctx, snapshotName)
+ if snap != nil {
+ if snap.SnapshotContent.StorageResource.Id == sourceVolID || (isSnapshot && snap.SnapshotContent.StorageResource.Id == filesystemResp.FileContent.StorageResource.Id) {
+ //Subtract AdditionalFilesystemSize for Filesystem snapshots
+ if protocol == NFS {
+ snap.SnapshotContent.Size -= AdditionalFilesystemSize
+ }
+ log.Infof("Snapshot already exists with same name %s for same storage resource %s", snapshotName, sourceVolID)
+ return snap, nil
+ }
+ return nil, status.Error(codes.AlreadyExists, utils.GetMessageWithRunID(rid, "Snapshot with same name %s already exists for storage resource %s", snapshotName, snap.SnapshotContent.StorageResource.Id))
+ }
+
+ var newSnapshot *types.Snapshot
+ if isSnapshot {
+ newSnapshot, err = snapshotAPI.CopySnapshot(ctx, sourceVolID, snapshotName)
+ if err != nil {
+ return nil, status.Error(codes.Unknown, utils.GetMessageWithRunID(rid, "Create Snapshot error: %v", err))
+ }
+ err = snapshotAPI.ModifySnapshot(ctx, newSnapshot.SnapshotContent.ResourceId, description, retentionDuration)
+ if err != nil {
+ log.Infof("Unable to modify description and retention duration in created snapshot %s. Error: %s", newSnapshot.SnapshotContent.ResourceId, err)
+ }
+ } else {
+ if isClone {
+ newSnapshot, err = snapshotAPI.CreateSnapshotWithFsAccesType(ctx, sourceVolID, snapshotName, description, retentionDuration, gounity.ProtocolAccessType)
+ } else {
+ newSnapshot, err = snapshotAPI.CreateSnapshot(ctx, sourceVolID, snapshotName, description, retentionDuration)
+ }
+ if err != nil {
+ return nil, status.Error(codes.Unknown, utils.GetMessageWithRunID(rid, "Create Snapshot error: %v", err))
+ }
+ }
+
+ newSnapshot, _ = snapshotAPI.FindSnapshotByName(ctx, snapshotName)
+ if newSnapshot != nil {
+ //Subtract AdditionalFilesystemSize for Filesystem snapshots{
+ if protocol == NFS {
+ newSnapshot.SnapshotContent.Size -= AdditionalFilesystemSize
+ }
+ return newSnapshot, nil
+ }
+ return nil, status.Error(codes.Unknown, utils.GetMessageWithRunID(rid, "Find Snapshot error after create. %v", err))
+}
diff --git a/service/controller_test.go b/service/controller_test.go
index 76c77740..677cffad 100644
--- a/service/controller_test.go
+++ b/service/controller_test.go
@@ -1,18 +1,26 @@
package service
import (
+ "context"
+ "github.com/dell/csi-unity/service/utils"
+ "github.com/stretchr/testify/assert"
"testing"
)
func TestControllerProbe(t *testing.T) {
- //DriverConfig = testConf.unityConfig
- //fmt.Println("----", testConf.unityConfig)
- //config, err := getDriverConfig(context.Background())
- //if err != nil {
- // t.Fatalf("TestBeforeServe failed with error %v", err)
- //}
- //
- //if len(config) == 0 {
- // t.Fatalf("Credentials are empty")
- //}
+ DriverConfig = testConf.unityConfig
+ err := testConf.service.syncDriverConfig(context.Background())
+ if err != nil {
+ t.Fatalf("TestBeforeServe failed with error %v", err)
+ }
+ if testConf.service.getStorageArrayLength() == 0 {
+ t.Fatalf("Credentials are empty")
+ }
+ log := utils.GetLogger()
+ ctx := context.Background()
+ entry := log.WithField(utils.RUNID, "1111")
+ ctx = context.WithValue(ctx, utils.UnityLogger, entry)
+
+ err = testConf.service.probe(ctx, "controller", "")
+ assert.True(t, err != nil, "probe failed")
}
diff --git a/service/identity.go b/service/identity.go
index b2b9cc41..bb28427b 100644
--- a/service/identity.go
+++ b/service/identity.go
@@ -13,16 +13,19 @@ func (s *service) Probe(
*csi.ProbeResponse, error) {
ctx, log, _ := GetRunidLog(ctx)
log.Infof("Executing Probe with args: %+v", *req)
- if !strings.EqualFold(s.mode, "node") {
+ if strings.EqualFold(s.mode, "controller") {
if err := s.controllerProbe(ctx, ""); err != nil {
+ log.Error("Identity probe failed:", err)
return nil, err
}
}
- if !strings.EqualFold(s.mode, "controller") {
+ if strings.EqualFold(s.mode, "node") {
if err := s.nodeProbe(ctx, ""); err != nil {
+ log.Error("Identity probe failed:", err)
return nil, err
}
}
+ log.Info("Identity probe success")
return &csi.ProbeResponse{}, nil
}
@@ -53,6 +56,20 @@ func (s *service) GetPluginCapabilities(
},
},
},
+ {
+ Type: &csi.PluginCapability_VolumeExpansion_{
+ VolumeExpansion: &csi.PluginCapability_VolumeExpansion{
+ Type: csi.PluginCapability_VolumeExpansion_ONLINE,
+ },
+ },
+ },
+ {
+ Type: &csi.PluginCapability_VolumeExpansion_{
+ VolumeExpansion: &csi.PluginCapability_VolumeExpansion{
+ Type: csi.PluginCapability_VolumeExpansion_OFFLINE,
+ },
+ },
+ },
},
}, nil
}
diff --git a/service/mount.go b/service/mount.go
index deb96433..8fa6b01e 100644
--- a/service/mount.go
+++ b/service/mount.go
@@ -32,6 +32,9 @@ func stagePublishNFS(ctx context.Context, req *csi.NodeStageVolumeRequest, expor
accMode := req.GetVolumeCapability().GetAccessMode()
+ volCap := req.GetVolumeCapability()
+ mntVol := volCap.GetMount()
+ mntFlags := mntVol.GetMountFlags()
// make sure target is created
err := createDirIfNotExist(ctx, stagingTargetPath, arrayId)
if err != nil {
@@ -42,6 +45,7 @@ func stagePublishNFS(ctx context.Context, req *csi.NodeStageVolumeRequest, expor
if accMode.GetMode() == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY {
rwo = "ro"
}
+ mntFlags = append(mntFlags, rwo)
mnts, err := gofsutil.GetMounts(ctx)
if err != nil {
@@ -74,7 +78,7 @@ func stagePublishNFS(ctx context.Context, req *csi.NodeStageVolumeRequest, expor
if nfsv4 {
nfsv4 = false
for _, exportPathURL := range exportPaths {
- err = gofsutil.Mount(ctx, exportPathURL, stagingTargetPath, "nfs", rwo)
+ err = gofsutil.Mount(ctx, exportPathURL, stagingTargetPath, "nfs", mntFlags...)
if err == nil {
nfsv4 = true
break
@@ -85,7 +89,7 @@ func stagePublishNFS(ctx context.Context, req *csi.NodeStageVolumeRequest, expor
if !nfsv4 && nfsv3 {
rwo += ",vers=3"
for _, exportPathURL := range exportPaths {
- err = gofsutil.Mount(ctx, exportPathURL, stagingTargetPath, "nfs", rwo)
+ err = gofsutil.Mount(ctx, exportPathURL, stagingTargetPath, "nfs", mntFlags...)
if err == nil {
break
}
@@ -99,7 +103,7 @@ func stagePublishNFS(ctx context.Context, req *csi.NodeStageVolumeRequest, expor
return nil
}
-func publishNFS(ctx context.Context, req *csi.NodePublishVolumeRequest, exportPaths []string, arrayId string, nfsv3, nfsv4 bool) error {
+func publishNFS(ctx context.Context, req *csi.NodePublishVolumeRequest, exportPaths []string, arrayId, chroot string, nfsv3, nfsv4 bool) error {
ctx, log, rid := GetRunidLog(ctx)
ctx, log = setArrayIdContext(ctx, arrayId)
@@ -120,7 +124,9 @@ func publishNFS(ctx context.Context, req *csi.NodePublishVolumeRequest, exportPa
rwo = "ro"
}
rwoArray = append(rwoArray, rwo)
-
+ mntVol := volCap.GetMount()
+ mntFlags := mntVol.GetMountFlags()
+ rwoArray = append(rwoArray, mntFlags...)
//Check if stage target mount exists
var stageExportPathURL string
stageMountExists := false
@@ -164,7 +170,7 @@ func publishNFS(ctx context.Context, req *csi.NodePublishVolumeRequest, exportPa
} else {
return status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Target path: %s is already mounted to export path: %s with conflicting access modes", targetPath, stageExportPathURL))
}
- } else if m.Path == stagingTargetPath {
+ } else if m.Path == stagingTargetPath || m.Path == chroot+stagingTargetPath {
continue
} else {
if accMode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER {
@@ -261,6 +267,10 @@ func stageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest, stagingPa
return status.Error(codes.Unavailable, utils.GetMessageWithRunID(rid, "Fs type %s not supported", fs))
}
+ if fs == "xfs" {
+ mntFlags = append(mntFlags, "nouuid")
+ }
+
if err := handleStageMount(ctx, mntFlags, sysDevice, fs, stagingPath); err != nil {
return status.Error(codes.Internal, utils.GetMessageWithRunID(rid, "Staging mount failed: %v", err))
}
@@ -298,7 +308,7 @@ func stageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest, stagingPa
return nil
}
-func publishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest, targetPath, symlinkPath string) error {
+func publishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest, targetPath, symlinkPath, chroot string) error {
rid, log := utils.GetRunidAndLogger(ctx)
stagingPath := req.GetStagingTargetPath()
@@ -336,7 +346,7 @@ func publishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest, targe
// Existing mount satisfies request
log.Debug("volume already published to target")
return nil
- } else if m.Path == stagingPath {
+ } else if m.Path == stagingPath || m.Path == chroot+stagingPath {
continue
} else {
//Device has been mounted aleady to another target
@@ -451,7 +461,7 @@ func unpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) e
}
//unstage volume removes staging mount and makes sure no other mounts are left for the given device path
-func unstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest, deviceWWN string) (bool, string, error) {
+func unstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest, deviceWWN, chroot string) (bool, string, error) {
rid, log := utils.GetRunidAndLogger(ctx)
lastUnmounted := false
id := req.GetVolumeId()
@@ -518,20 +528,20 @@ func unstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest, devic
for _, m := range mnts {
if m.Source == sysDevice.FullPath || m.Device == sysDevice.FullPath {
- if m.Path == stagingTarget {
+ if m.Path == stagingTarget || m.Path == chroot+stagingTarget {
stgMnt = true
break
} else {
log.Infof("Device %s has been mounted outside staging target on %s", sysDevice.FullPath, m.Path)
}
- } else if m.Path == stagingTarget && !(m.Source == sysDevice.FullPath || m.Device == sysDevice.FullPath) {
+ } else if (m.Path == stagingTarget || m.Path == chroot+stagingTarget) && !(m.Source == sysDevice.FullPath || m.Device == sysDevice.FullPath) {
log.Infof("Staging path %s has been mounted by foreign device %s", stagingTarget, m.Device)
}
}
if stgMnt {
log.Debugf("Unmount sysDevice: %v staging target: %s", sysDevice, stagingTarget)
- if lastUnmounted, err = unmountStagingMount(ctx, sysDevice, stagingTarget); err != nil {
+ if lastUnmounted, err = unmountStagingMount(ctx, sysDevice, stagingTarget, chroot); err != nil {
return lastUnmounted, "", status.Error(codes.Internal, utils.GetMessageWithRunID(rid, "Error unmounting staging mount %s: %s", stagingTarget, err.Error()))
}
log.Debugf("Device %s unmounted from private mount path %s successfully", sysDevice.Name, stagingTarget)
@@ -701,7 +711,7 @@ func GetDevice(ctx context.Context, path string) (*Device, error) {
func unmountStagingMount(
ctx context.Context,
dev *Device,
- target string) (bool, error) {
+ target, chroot string) (bool, error) {
log := utils.GetRunidLogger(ctx)
lastUnmounted := false
@@ -722,7 +732,8 @@ func unmountStagingMount(
}
// remove private mount if we can (if there are no other mounts
- if len(mnts) == 1 && mnts[0].Path == target {
+ // mnts length will be 1 for coreos and 2 for other operating systems
+ if (len(mnts) == 1 || len(mnts) == 2) && (mnts[0].Path == target || mnts[0].Path == chroot+target) {
if err := gofsutil.Unmount(ctx, target); err != nil {
return lastUnmounted, err
}
diff --git a/service/node.go b/service/node.go
index 9779644d..46f2fc12 100644
--- a/service/node.go
+++ b/service/node.go
@@ -28,6 +28,7 @@ import (
var (
targetMountRecheckSleepTime = 3 * time.Second
disconnectVolumeRetryTime = 1 * time.Second
+ nodeStartTimeout = 3 * time.Second
lunzMutex sync.Mutex
LUNZHLU = 0
nodeMutex sync.Mutex
@@ -172,7 +173,7 @@ func (s *service) NodeStageVolume(
wwn := utils.GetFcPortWwnFromVolumeContentWwn(fcPort.FcPortContent.Wwn)
if !utils.ArrayContains(targetWwns, wwn) {
- log.Debugf("Found Target wwn: ", wwn)
+ log.Debug("Found Target wwn: ", wwn)
targetWwns = append(targetWwns, wwn)
}
}
@@ -185,7 +186,7 @@ func (s *service) NodeStageVolume(
}
}
- log.Debugf("Connect context data: ", publishContextData)
+ log.Debug("Connect context data: ", publishContextData)
devicePath, err := s.connectDevice(ctx, publishContextData, useFC)
if err != nil {
return nil, err
@@ -282,7 +283,7 @@ func (s *service) NodeUnstageVolume(
}
volumeWwn := utils.GetWwnFromVolumeContentWwn(volume.VolumeContent.Wwn)
- lastMounted, devicePath, err := unstageVolume(ctx, req, volumeWwn)
+ lastMounted, devicePath, err := unstageVolume(ctx, req, volumeWwn, s.opts.Chroot)
if err != nil {
return nil, err
}
@@ -383,7 +384,7 @@ func (s *service) NodePublishVolume(
if len(exportPaths) == 0 {
return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Export paths not exist on NFS Share: %s", nfsShare.NFSShareContent.Id))
}
- err = publishNFS(ctx, req, exportPaths, arrayId, nfsv3, nfsv4)
+ err = publishNFS(ctx, req, exportPaths, arrayId, s.opts.Chroot, nfsv3, nfsv4)
if err != nil {
return nil, err
}
@@ -391,10 +392,6 @@ func (s *service) NodePublishVolume(
return &csi.NodePublishVolumeResponse{}, nil
}
- if req.GetReadonly() == true {
- return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "readonly must be false, because the supported mode only SINGLE_NODE_WRITER"))
- }
-
volumeApi := gounity.NewVolume(unity)
volume, err := volumeApi.FindVolumeById(ctx, volID)
if err != nil {
@@ -408,7 +405,7 @@ func (s *service) NodePublishVolume(
return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Disk path not found. Error: %v", err))
}
- if err := publishVolume(ctx, req, targetPath, symlinkPath); err != nil {
+ if err := publishVolume(ctx, req, targetPath, symlinkPath, s.opts.Chroot); err != nil {
return nil, err
}
@@ -592,6 +589,8 @@ func (s *service) NodeGetInfo(
log.Debugf("Executing NodeGetInfo with args: %+v", *req)
atleastOneArraySuccess := false
+ //Sleep for a while and wait untill iscsi discovery is completed
+ time.Sleep(nodeStartTimeout)
for _, array := range s.getStorageArrayList() {
if array.IsHostAdded {
atleastOneArraySuccess = true
@@ -632,6 +631,13 @@ func (s *service) NodeGetCapabilities(
},
},
},
+ {
+ Type: &csi.NodeServiceCapability_Rpc{
+ Rpc: &csi.NodeServiceCapability_RPC{
+ Type: csi.NodeServiceCapability_RPC_EXPAND_VOLUME,
+ },
+ },
+ },
},
}, nil
}
@@ -643,8 +649,103 @@ func (s *service) NodeGetVolumeStats(
return nil, status.Error(codes.Unimplemented, "NodeGetVolumeStats not supported")
}
-func (s *service) NodeExpandVolume(context.Context, *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) {
- return nil, status.Error(codes.Unimplemented, "NodeExpandVolume not supported")
+func (s *service) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) {
+ ctx, log, rid := GetRunidLog(ctx)
+ log.Debugf("Executing NodeExpandVolume with args: %+v", *req)
+
+ if req.VolumeId == "" {
+ return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "volumeId is mandatory parameter"))
+ }
+
+ volID, _, arrayID, unity, err := s.validateAndGetResourceDetails(ctx, req.VolumeId, volumeType)
+ if err != nil {
+ return nil, err
+ }
+
+ size := req.GetCapacityRange().GetRequiredBytes()
+
+ ctx, log = setArrayIdContext(ctx, arrayID)
+ if err := s.requireProbe(ctx, arrayID); err != nil {
+ log.Debug("AutoProbe has not been called. Executing manual probe")
+ err = s.nodeProbe(ctx, arrayID)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // We are getting target path that points to mounted path on "/"
+ // This doesn't help us, though we should trace the path received
+ volumePath := req.GetVolumePath()
+ if volumePath == "" {
+ return nil, status.Error(codes.InvalidArgument,
+ utils.GetMessageWithRunID(rid, "Volume path required"))
+ }
+
+ volumeAPI := gounity.NewVolume(unity)
+ volume, err := volumeAPI.FindVolumeById(ctx, volID)
+ if err != nil {
+ return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Find volume Failed %v", err))
+ }
+
+ volName := volume.VolumeContent.Name
+
+ //Locate and fetch all (multipath/regular) mounted paths using this volume
+ devMnt, err := gofsutil.GetMountInfoFromDevice(ctx, volName)
+ if err != nil {
+ return nil, status.Error(codes.Internal,
+ utils.GetMessageWithRunID(rid, "Failed to find mount info for (%s) with error %v", volName, err))
+ }
+
+ log.Debugf("Mount info for volume %s: %+v", volName, devMnt)
+
+ // Rescan the device for the volume expanded on the array
+ for _, device := range devMnt.DeviceNames {
+ log.Debug("Begin rescan for :", device)
+ devicePath := sysBlock + "/" + device
+ err = gofsutil.DeviceRescan(ctx, devicePath)
+ if err != nil {
+ return nil, status.Error(codes.Internal,
+ utils.GetMessageWithRunID(rid, "Failed to rescan device (%s) with error %v", devicePath, err))
+ }
+ }
+ // Expand the filesystem with the actual expanded volume size.
+ if devMnt.MPathName != "" {
+ err = gofsutil.ResizeMultipath(ctx, devMnt.MPathName)
+ if err != nil {
+ return nil, status.Error(codes.Internal,
+ utils.GetMessageWithRunID(rid, "Failed to resize filesystem: device (%s) with error %v", devMnt.MountPoint, err))
+ }
+ }
+ //For a regular device, get the device path (devMnt.DeviceNames[1]) where the filesystem is mounted
+ //PublishVolume creates devMnt.DeviceNames[0] but is left unused for regular devices
+ var devicePath string
+ if len(devMnt.DeviceNames) > 1 {
+ devicePath = "/dev/" + devMnt.DeviceNames[1]
+ } else if len(devMnt.DeviceNames) == 1 {
+ devicePath = "/dev/" + devMnt.DeviceNames[0]
+ } else if devicePath == "" {
+ return nil, status.Error(codes.Internal,
+ utils.GetMessageWithRunID(rid, "Failed to resize filesystem: device name not found for (%s)", devMnt.MountPoint))
+ }
+
+ fsType, err := gofsutil.FindFSType(ctx, devMnt.MountPoint)
+ if err != nil {
+ return nil, status.Error(codes.Internal,
+ utils.GetMessageWithRunID(rid, "Failed to fetch filesystem for volume (%s) with error %v", devMnt.MountPoint, err))
+ }
+
+ log.Infof("Found %s filesystem mounted on volume %s", fsType, devMnt.MountPoint)
+
+ //Resize the filesystem
+ err = gofsutil.ResizeFS(ctx, devMnt.MountPoint, devicePath, devMnt.MPathName, fsType)
+ if err != nil {
+ return nil, status.Error(codes.Internal,
+ utils.GetMessageWithRunID(rid, "Failed to resize filesystem: mountpoint (%s) device (%s) with error %v",
+ devMnt.MountPoint, devicePath, err))
+ }
+
+ log.Debug("Node Expand completed successfully")
+ return &csi.NodeExpandVolumeResponse{CapacityBytes: size}, nil
}
func (s *service) nodeProbe(ctx context.Context, arrayId string) error {
@@ -656,26 +757,44 @@ func (s *service) getNFSShare(ctx context.Context, filesystemId, arrayId string)
ctx, _, rid := GetRunidLog(ctx)
ctx, _ = setArrayIdContext(ctx, arrayId)
- unity, err := s.getUnityClient(arrayId)
+ unity, err := s.getUnityClient(ctx, arrayId)
if err != nil {
return nil, false, false, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Get Unity client for array %s failed. Error: %v ", arrayId, err))
}
+ isSnapshot := false
fileApi := gounity.NewFilesystem(unity)
filesystem, err := fileApi.FindFilesystemById(ctx, filesystemId)
+ var snapResp *types.Snapshot
if err != nil {
- return nil, false, false, err
+ snapshotApi := gounity.NewSnapshot(unity)
+ snapResp, err = snapshotApi.FindSnapshotById(ctx, filesystemId)
+ if err != nil {
+ return nil, false, false, err
+ }
+ isSnapshot = true
+ filesystem, err = s.getFilesystemByResourceID(ctx, snapResp.SnapshotContent.StorageResource.Id, arrayId)
+ if err != nil {
+ return nil, false, false, err
+ }
}
var nfsShareId string
- nfsShareName := NFSShareNamePrefix + filesystem.FileContent.Name
+
for _, nfsShare := range filesystem.FileContent.NFSShare {
- if nfsShare.Name == nfsShareName {
- nfsShareId = nfsShare.Id
+ if isSnapshot {
+ if nfsShare.Path == NFSShareLocalPath && nfsShare.ParentSnap.Id == filesystemId {
+ nfsShareId = nfsShare.Id
+ }
+ } else {
+ if nfsShare.Path == NFSShareLocalPath && nfsShare.ParentSnap.Id == "" {
+ nfsShareId = nfsShare.Id
+ }
}
}
+
if nfsShareId == "" {
- return nil, false, false, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "NFS Share: %s not found. Error: %v", nfsShareName, err))
+ return nil, false, false, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "NFS Share for filesystem: %s not found. Error: %v", filesystemId, err))
}
nfsShare, err := fileApi.FindNFSShareById(ctx, nfsShareId)
@@ -699,7 +818,7 @@ func (s *service) getNFSShare(ctx context.Context, filesystemId, arrayId string)
func (s *service) checkFilesystemMapping(ctx context.Context, nfsShare *types.NFSShare, am *csi.VolumeCapability_AccessMode, arrayId string) error {
ctx, _, rid := GetRunidLog(ctx)
ctx, _ = setArrayIdContext(ctx, arrayId)
- unity, err := s.getUnityClient(arrayId)
+ unity, err := s.getUnityClient(ctx, arrayId)
var accessType gounity.AccessType
if err != nil {
return err
@@ -740,7 +859,7 @@ func (s *service) checkVolumeMapping(ctx context.Context, volume *types.Volume,
rid, log := utils.GetRunidAndLogger(ctx)
//Get Host Name
hostName := s.opts.NodeName
- unity, err := s.getUnityClient(arrayId)
+ unity, err := s.getUnityClient(ctx, arrayId)
if err != nil {
return 0, err
}
@@ -819,7 +938,7 @@ func getTargetMount(ctx context.Context, target string) (gofsutil.Info, error) {
func (s *service) getArrayHostInitiators(ctx context.Context, host *types.Host, arrayId string) ([]string, error) {
var hostInitiatorWwns []string
hostContent := host.HostContent
- unity, err := s.getUnityClient(arrayId)
+ unity, err := s.getUnityClient(ctx, arrayId)
if err != nil {
return nil, err
}
@@ -1094,6 +1213,12 @@ func (s *service) addNodeInformationIntoArray(ctx context.Context, array *Storag
if errFc != nil && errIscsi != nil {
log.Infof("Node %s does not have FC or iSCSI initiators and can only be used for NFS exports", s.opts.NodeName)
}
+
+ nodeIps, err := utils.GetHostIP()
+ if err != nil {
+ return status.Error(codes.Unknown, utils.GetMessageWithRunID(rid, "Unable to get node IP. Error: %v", err))
+ }
+
//Find Host on the Array
host, err := hostApi.FindHostByName(ctx, s.opts.NodeName)
if err != nil {
@@ -1112,6 +1237,12 @@ func (s *service) addNodeInformationIntoArray(ctx context.Context, array *Storag
if err != nil {
return err
}
+ for _, nodeIp := range nodeIps {
+ _, err = hostApi.CreateHostIpPort(ctx, hostContent.ID, nodeIp)
+ if err != nil {
+ return err
+ }
+ }
if len(wwns) > 0 {
//Create Host FC Initiators
@@ -1139,7 +1270,7 @@ func (s *service) addNodeInformationIntoArray(ctx context.Context, array *Storag
return err
}
} else {
- log.Debug("Host %s exists on the array", s.opts.NodeName)
+ log.Debugf("Host %s exists on the array", s.opts.NodeName)
hostContent := host.HostContent
arrayHostWwns, err := s.getArrayHostInitiators(ctx, host, array.ArrayId)
if err != nil {
@@ -1174,25 +1305,40 @@ func (s *service) addNodeInformationIntoArray(ctx context.Context, array *Storag
}
//Check Ip of the host with Host IP Port
- findHostIpPort := false
+ findHostNamePort := false
for _, ipPort := range hostContent.IpPorts {
hostIpPort, err := hostApi.FindHostIpPortById(ctx, ipPort.Id)
if err != nil {
continue
}
if hostIpPort != nil && hostIpPort.HostIpContent.Address == s.opts.LongNodeName {
- findHostIpPort = true
- break
+ findHostNamePort = true
+ continue
+ }
+ if hostIpPort != nil {
+ for i, nodeIp := range nodeIps {
+ if hostIpPort.HostIpContent.Address == nodeIp {
+ nodeIps[i] = nodeIps[len(nodeIps)-1]
+ nodeIps = nodeIps[:len(nodeIps)-1]
+ break
+ }
+ }
}
}
- if findHostIpPort == false {
+ if findHostNamePort == false {
//Create Host Ip Port
_, err = hostApi.CreateHostIpPort(ctx, hostContent.ID, s.opts.LongNodeName)
if err != nil {
return err
}
}
+ for _, nodeIp := range nodeIps {
+ _, err = hostApi.CreateHostIpPort(ctx, hostContent.ID, nodeIp)
+ if err != nil {
+ return err
+ }
+ }
}
if len(iqns) > 0 {
diff --git a/service/service.go b/service/service.go
index 63cc5552..1fa75ed8 100644
--- a/service/service.go
+++ b/service/service.go
@@ -251,7 +251,12 @@ func (s *service) getStorageArrayList() []*StorageArrayConfig {
}
// To get the UnityClient for a specific array
-func (s *service) getUnityClient(arrayID string) (*gounity.Client, error) {
+func (s *service) getUnityClient(ctx context.Context, arrayID string) (*gounity.Client, error) {
+ _, _, rid := GetRunidLog(ctx)
+ if s.getStorageArrayLength() == 0 {
+ return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Invalid driver csi-driver configuration provided. At least one array should present or invalid json format. "))
+ }
+
array := s.getStorageArray(arrayID)
if array != nil && array.UnityClient != nil {
return array.UnityClient, nil
@@ -318,7 +323,12 @@ func (s *service) loadDynamicConfig(ctx context.Context, configFile string) erro
}
if event.Op&fsnotify.Create == fsnotify.Create && event.Name == parentFolder+"/..data" {
log.Infof("****************Driver config file modified. Loading the config file:%s****************", event.Name)
- s.syncDriverConfig(ctx)
+ err := s.syncDriverConfig(ctx)
+ if err != nil {
+ log.Debug("Driver configuration array length:", s.getStorageArrayLength())
+ log.Error("Invalid configuration in secret.json. Error:", err)
+ //return
+ }
if s.mode == "node" {
syncNodeInfoChan <- true
}
@@ -364,6 +374,12 @@ var syncMutex sync.Mutex
func (s *service) syncDriverConfig(ctx context.Context) error {
ctx, log, _ := GetRunidLog(ctx)
log.Info("*************Synchronizing driver config**************")
+ syncMutex.Lock()
+ defer syncMutex.Unlock()
+ s.arrays.Range(func(key interface{}, value interface{}) bool {
+ s.arrays.Delete(key)
+ return true
+ })
configBytes, err := ioutil.ReadFile(DriverConfig)
if err != nil {
return errors.New(fmt.Sprintf("File ('%s') error: %v", DriverConfig, err))
@@ -380,12 +396,6 @@ func (s *service) syncDriverConfig(ctx context.Context) error {
return errors.New("Arrays details are not provided in unity-creds secret")
}
- if len(jsonConfig.StorageArrayList) > 10 {
- return errors.New("Total number of Arrays should be less than or equal to 10 in 'storageArrayList' parameter")
- }
-
- syncMutex.Lock()
- defer syncMutex.Unlock()
s.arrays.Range(func(key interface{}, value interface{}) bool {
s.arrays.Delete(key)
return true
@@ -526,6 +536,8 @@ func GetRunidLog(ctx context.Context) (context.Context, *logrus.Entry, string) {
fields[utils.RUNID] = rid
}
+ logMutex.Lock()
+ defer logMutex.Unlock()
l := utils.GetLogger()
log := l.WithFields(fields)
ctx = context.WithValue(ctx, utils.UnityLogger, log)
@@ -614,6 +626,7 @@ func singleArrayProbe(ctx context.Context, probeType string, array *StorageArray
Password: array.Password,
})
if err != nil {
+ log.Errorf("Unity authentication failed for array %s error: %v", array.ArrayId, err)
if e, ok := status.FromError(err); ok {
if e.Code() == codes.Unauthenticated {
array.IsProbeSuccess = false
@@ -645,6 +658,9 @@ func (s *service) probe(ctx context.Context, probeType string, arrayId string) e
err := singleArrayProbe(ctx, probeType, array)
if err == nil {
atleastOneArraySuccess = true
+ break
+ } else {
+ log.Errorf("Probe failed for array %s error:%v", array, err)
}
}
@@ -658,6 +674,9 @@ func (s *service) probe(ctx context.Context, probeType string, arrayId string) e
func (s *service) validateAndGetResourceDetails(ctx context.Context, resourceContextId string, resourceType resourceType) (resourceId, protocol, arrayId string, unity *gounity.Client, err error) {
ctx, _, rid := GetRunidLog(ctx)
+ if s.getStorageArrayLength() == 0 {
+ return "", "", "", nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Invalid driver csi-driver configuration provided. At least one array should present or invalid json format. "))
+ }
resourceId = getVolumeIdFromVolumeContext(resourceContextId)
if resourceId == "" {
return "", "", "", nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "%sId can't be empty.", resourceType))
@@ -672,7 +691,7 @@ func (s *service) validateAndGetResourceDetails(ctx context.Context, resourceCon
return "", "", "", nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "[%s] [%s] error:[%v]", resourceType, resourceId, err))
}
- unity, err = s.getUnityClient(arrayId)
+ unity, err = s.getUnityClient(ctx, arrayId)
if err != nil {
return "", "", "", nil, err
}
diff --git a/service/utils/emcutils.go b/service/utils/emcutils.go
index fb1ae9b3..41cca740 100644
--- a/service/utils/emcutils.go
+++ b/service/utils/emcutils.go
@@ -10,6 +10,7 @@ import (
"fmt"
"io/ioutil"
"net"
+ "os"
"os/exec"
"strings"
"time"
@@ -33,6 +34,25 @@ func GetVolumeResponseFromFilesystem(filesystem *types.Filesystem, arrayId, prot
return getVolumeResponse(content.Name, protocol, arrayId, content.Id, content.SizeTotal)
}
+func GetVolumeResponseFromSnapshot(snapshot *types.Snapshot, arrayId, protocol string) *csi.CreateVolumeResponse {
+ volId := fmt.Sprintf("%s-%s-%s-%s", snapshot.SnapshotContent.Name, protocol, arrayId, snapshot.SnapshotContent.ResourceId)
+ VolumeContext := make(map[string]string)
+ VolumeContext["protocol"] = protocol
+ VolumeContext["arrayId"] = arrayId
+ VolumeContext["volumeId"] = snapshot.SnapshotContent.ResourceId
+
+ volumeReq := &csi.Volume{
+ VolumeId: volId,
+ CapacityBytes: int64(snapshot.SnapshotContent.Size),
+ VolumeContext: VolumeContext,
+ }
+
+ volumeResp := &csi.CreateVolumeResponse{
+ Volume: volumeReq,
+ }
+ return volumeResp
+}
+
func getVolumeResponse(name, protocol, arrayId, resourceId string, size uint64) *csi.CreateVolumeResponse {
volId := fmt.Sprintf("%s-%s-%s-%s", name, protocol, arrayId, resourceId)
VolumeContext := make(map[string]string)
@@ -109,7 +129,7 @@ func GetFCInitiators(ctx context.Context) ([]string, error) {
}
//Utility method to extract Host IP
-func GetHostIP() (string, error) {
+func GetHostIP() ([]string, error) {
cmd := exec.Command("hostname", "-I")
cmdOutput := &bytes.Buffer{}
cmd.Stdout = cmdOutput
@@ -120,13 +140,28 @@ func GetHostIP() (string, error) {
cmd.Stdout = cmdOutput
err = cmd.Run()
if err != nil {
- return "", err
+ return nil, err
}
}
-
output := string(cmdOutput.Bytes())
- ip := strings.Split(output, " ")[0]
- return ip, nil
+ ips := strings.Split(strings.TrimSpace(output), " ")
+
+ hostname, err := os.Hostname()
+ if err != nil {
+ return nil, err
+ }
+
+ var lookup_ips []string
+ for _, ip := range ips {
+ lookupResp, err := net.LookupAddr(ip)
+ if err == nil && strings.Contains(lookupResp[0], hostname) {
+ lookup_ips = append(lookup_ips, ip)
+ }
+ }
+ if len(lookup_ips) == 0 {
+ lookup_ips = append(lookup_ips, ips[0])
+ }
+ return lookup_ips, nil
}
//Utility method to convert Unity Rest type Snapshot to CSI standard Snapshot Response
diff --git a/service/validator.go b/service/validator.go
index a3c2bc19..fc181f9c 100644
--- a/service/validator.go
+++ b/service/validator.go
@@ -4,6 +4,9 @@ import (
"fmt"
"github.com/container-storage-interface/spec/lib/go/csi"
+ "github.com/dell/csi-unity/service/utils"
+ "github.com/dell/gounity/types"
+ "golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
@@ -98,3 +101,41 @@ func valVolumeCaps(vcs []*csi.VolumeCapability, protocol string) (bool, string)
return supported, reason
}
+
+//Validates idempotency of an existing snapshot created from a filesystem
+func validateCreateFsFromSnapshot(ctx context.Context, sourceFilesystemResp *types.Filesystem, storagePool string, tieringPolicy, hostIoSize int64, thin, dataReduction bool) error {
+
+ rid, _ := utils.GetRunidAndLogger(ctx)
+
+ // Validate the storagePool parameter
+ if sourceFilesystemResp.FileContent.Pool.Id != storagePool {
+ return status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source filesystem storage pool %s is different than the requested storage pool %s",
+ sourceFilesystemResp.FileContent.Pool.Id, storagePool))
+ }
+
+ //Validate the thinProvisioned parameter
+ if sourceFilesystemResp.FileContent.IsThinEnabled != thin {
+ return status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source filesystem thin provision %v is different than the requested thin provision %v",
+ sourceFilesystemResp.FileContent.IsThinEnabled, thin))
+ }
+
+ //Validate the dataReduction parameter
+ if sourceFilesystemResp.FileContent.IsDataReductionEnabled != dataReduction {
+ return status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source filesystem data reduction %v is different than the requested data reduction %v",
+ sourceFilesystemResp.FileContent.IsDataReductionEnabled, dataReduction))
+ }
+
+ //Validate the tieringPolicy parameter
+ if int64(sourceFilesystemResp.FileContent.TieringPolicy) != tieringPolicy {
+ return status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source filesystem tiering policy %v is different than the requested tiering policy %v",
+ sourceFilesystemResp.FileContent.TieringPolicy, tieringPolicy))
+ }
+
+ //Validate the hostIOSize parameter
+ if sourceFilesystemResp.FileContent.HostIOSize != hostIoSize {
+ return status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source filesystem host IO size %v is different than the requested host IO size %v",
+ sourceFilesystemResp.FileContent.HostIOSize, hostIoSize))
+ }
+
+ return nil
+}
diff --git a/test/integration-test/features/integration.feature b/test/integration-test/features/integration.feature
new file mode 100644
index 00000000..cbe2319d
--- /dev/null
+++ b/test/integration-test/features/integration.feature
@@ -0,0 +1,211 @@
+Feature: CSI interface
+ As a consumer of the CSI interface
+ I want to run a system test
+ So that I know the service functions correctly.
+
+ Scenario: Controller get capabilities, create, validate capabilities and delete basic volume
+ Given a CSI service
+ When I call Controller Get Capabilities
+ Then there are no errors
+ And a basic block volume request name "gditest-vol1" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ Then there are no errors
+ When I call validate volume capabilities with protocol "FC" with same access mode
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Create, validate capabilities and delete basic volume
+ Given a CSI service
+ And a basic block volume request name "gditest-vol2" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ Then there are no errors
+ When I call validate volume capabilities with protocol "FC" with different access mode
+ Then the error message should contain "Unsupported capability"
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Create, expand and delete basic volume
+ Given a CSI service
+ And a basic block volume request name "gditest-vol3" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ Then there are no errors
+ When I call Controller Expand Volume "3"
+ Then there are no errors
+ And a basic block volume request name "gditest-vol3" arrayId "Array1-Id" protocol "FC" size "3"
+ When I call CreateVolume
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Controller expand volume with smaller new size
+ Given a CSI service
+ And a basic block volume request name "gditest-vol4" arrayId "Array1-Id" protocol "FC" size "3"
+ When I call CreateVolume
+ Then there are no errors
+ When I call Controller Expand Volume "2"
+ Then the error message should contain "requested new capacity smaller than existing capacity"
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Idempotent create and delete basic volume
+ Given a CSI service
+ And a basic block volume request name "gditest-vol5" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ And I call CreateVolume
+ And when I call DeleteVolume
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Create a volume from snapshot of thin volume
+ Given a CSI service
+ And a basic block volume request name "gditest-vol6" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ And there are no errors
+ Given a create snapshot request "snap_volforsnap"
+ When I call CreateSnapshot
+ And there are no errors
+ Given a basic block volume request with volume content source with name "gditest-vol7" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+ Given a delete snapshot request
+ When I call DeleteSnapshot
+ Then there are no errors
+ And When I call DeleteAllCreatedVolumes
+ Then there are no errors
+
+ Scenario: Create, publish, unpublish, and delete basic volume with idempotency check for publish and unpublish
+ Given a CSI service
+ And a basic block volume request name "gditest-vol8" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ And there are no errors
+ And when I call PublishVolume
+ And there are no errors
+ And when I call PublishVolume
+ And there are no errors
+ And when I call UnpublishVolume
+ And there are no errors
+ And when I call UnpublishVolume
+ And there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Create and delete basic 264000G volume
+ Given a CSI service
+ And a basic block volume request name "gditest-vol9" arrayId "Array1-Id" protocol "FC" size "264000"
+ When I call CreateVolume
+ Then the error message should contain "The system could not create the LUNs because specified size is too big."
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Create and delete basic 96G volume
+ Given a CSI service
+ And a basic block volume request name "gditest-vol10" arrayId "Array1-Id" protocol "FC" size "96"
+ When I call CreateVolume
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Create volume, create snapshot, delete snapshot and delete volume
+ Given a CSI service
+ And a basic block volume request name "gditest-vol11" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ And there are no errors
+ Given a create snapshot request "snap_integration1"
+ When I call CreateSnapshot
+ And there are no errors
+ Given a delete snapshot request
+ And I call DeleteSnapshot
+ And there are no errors
+ And when I call DeleteVolume
+ And there are no errors
+
+ Scenario: Create volume, idempotent create snapshot, idempotent delete snapshot delete volume
+ Given a CSI service
+ And a basic block volume request name "gditest-vol12" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ And there are no errors
+ Given a create snapshot request "snap_integration1"
+ When I call CreateSnapshot
+ And there are no errors
+ Given a create snapshot request "snap_integration1"
+ When I call CreateSnapshot
+ And there are no errors
+ Given a delete snapshot request
+ And I call DeleteSnapshot
+ And there are no errors
+ Given a delete snapshot request
+ And I call DeleteSnapshot
+ And there are no errors
+ And when I call DeleteVolume
+ And there are no errors
+
+ Scenario: Node stage, publish, unpublish and unstage volume with idempotency
+ Given a CSI service
+ And a basic block volume request name "gditest-vol13" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ And there are no errors
+ And when I call PublishVolume
+ And there are no errors
+ And when I call NodeStageVolume fsType "ext4"
+ And there are no errors
+ And when I call NodeStageVolume fsType "ext4"
+ And there are no errors
+ And when I call NodePublishVolume fsType "ext4" readonly "false"
+ Then there are no errors
+ And when I call NodePublishVolume fsType "ext4" readonly "false"
+ Then there are no errors
+ And when I call NodeUnPublishVolume
+ And there are no errors
+ And when I call NodeUnPublishVolume
+ Then there are no errors
+ And when I call NodeUnstageVolume
+ And there are no errors
+ And when I call NodeUnstageVolume
+ And there are no errors
+ And when I call UnpublishVolume
+ And there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Node stage, publish, unpublish and unstage volume for iSCSI
+ Given a CSI service
+ And a basic block volume request name "gditest-vol14" arrayId "Array1-Id" protocol "iSCSI" size "5"
+ When I call CreateVolume
+ And there are no errors
+ And when I call PublishVolume
+ And there are no errors
+ And when I call NodeStageVolume fsType "ext4"
+ And there are no errors
+ And when I call NodePublishVolume fsType "ext4" readonly "false"
+ Then there are no errors
+ And when I call NodeUnPublishVolume
+ And there are no errors
+ And when I call NodeUnstageVolume
+ And there are no errors
+ And when I call UnpublishVolume
+ And there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Node stage, publish, unpublish and unstage volume for NFS
+ Given a CSI service
+ And a basic block volume request name "gditest-vol15" arrayId "Array1-Id" protocol "NFS" size "5"
+ When I call CreateVolume
+ And there are no errors
+ And when I call PublishVolume
+ And there are no errors
+ And when I call NodeStageVolume fsType ""
+ And there are no errors
+ And when I call NodePublishVolume fsType "" readonly "false"
+ Then there are no errors
+ And when I call NodeUnPublishVolume
+ And there are no errors
+ And when I call NodeUnstageVolume
+ And there are no errors
+ And when I call UnpublishVolume
+ And there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
\ No newline at end of file
diff --git a/test/integration-test/integration_main_test.go b/test/integration-test/integration_main_test.go
index 73adf984..f4a012f2 100644
--- a/test/integration-test/integration_main_test.go
+++ b/test/integration-test/integration_main_test.go
@@ -2,8 +2,11 @@ package integration_test
import (
"context"
+ "encoding/json"
"fmt"
+ "io/ioutil"
"os"
+ "strconv"
"testing"
"time"
@@ -17,9 +20,33 @@ import (
var grpcClient *grpc.ClientConn
+//To parse the secret json file
+type StorageArrayList struct {
+ StorageArrayList []StorageArrayConfig `json:"storageArrayList"`
+}
+
+type StorageArrayConfig struct {
+ ArrayId string `json:"arrayId"`
+}
+
func TestMain(m *testing.M) {
var stop func()
os.Setenv("X_CSI_MODE", "")
+
+ file, err := ioutil.ReadFile(os.Getenv("DRIVER_CONFIG"))
+ if err != nil {
+ panic("Driver Config missing")
+ }
+ arrayIdList := StorageArrayList{}
+ _ = json.Unmarshal([]byte(file), &arrayIdList)
+ if len(arrayIdList.StorageArrayList) == 0 {
+ panic("Array Info not provided")
+ }
+ for i := 0; i < len(arrayIdList.StorageArrayList); i++ {
+ arrayIdvar := "Array" + strconv.Itoa(i+1) + "-Id"
+ os.Setenv(arrayIdvar, arrayIdList.StorageArrayList[i].ArrayId)
+ }
+
ctx := context.Background()
fmt.Printf("calling startServer")
grpcClient, stop = startServer(ctx)
diff --git a/test/integration-test/integration_test.go b/test/integration-test/integration_test.go
index edc2406d..152aefb4 100644
--- a/test/integration-test/integration_test.go
+++ b/test/integration-test/integration_test.go
@@ -88,8 +88,9 @@ func (f *feature) aBasicBlockVolumeRequest(volumeName, arrayId, protocol string,
params["isDataReductionEnabled"] = "false"
params["tieringPolicy"] = "0"
params["description"] = "CSI Volume Unit Test"
- params["arrayId"] = arrayId
+ params["arrayId"] = os.Getenv(arrayId)
params["protocol"] = protocol
+ params["nasServer"] = os.Getenv("NAS_SERVER")
req.Parameters = params
req.Name = volumeName
capacityRange := new(csi.CapacityRange)
@@ -117,7 +118,7 @@ func (f *feature) aBasicBlockVolumeRequestWithVolumeContentSource(volumeName, ar
req := new(csi.CreateVolumeRequest)
params := make(map[string]string)
params["storagePool"] = os.Getenv("STORAGE_POOL")
- params["arrayId"] = arrayId
+ params["arrayId"] = os.Getenv(arrayId)
params["protocol"] = protocol
req.Parameters = params
req.Name = volumeName
diff --git a/test/unit-test/features/unit.feature b/test/unit-test/features/unit.feature
new file mode 100644
index 00000000..01ab76ea
--- /dev/null
+++ b/test/unit-test/features/unit.feature
@@ -0,0 +1,750 @@
+Feature: CSI interface
+ As a consumer of the CSI interface
+ I want to run a system test
+ So that I know the service functions correctly.
+
+ Scenario: Add node info to array
+ Given a CSI service with node
+ Given a CSI service with node
+ Then there are no errors
+
+ Scenario: Create and Delete snapshot successfully
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol1" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ Then there are no errors
+ Given a create snapshot request "csi_snapshot_test"
+ When I call CreateSnapshot
+ Then there are no errors
+ Given a delete snapshot request
+ When I call DeleteSnapshot
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Create snapshot with a name that already exists
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol2" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ Then there are no errors
+ And a create snapshot request "snap1"
+ When I call CreateSnapshot
+ Then there are no errors
+ When I call CreateSnapshot
+ Then there are no errors
+ Given a delete snapshot request
+ When I call DeleteSnapshot
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Create snapshot without giving storage resource name
+ Given a CSI service
+ And a create snapshot request "snapshot_test1"
+ When I call CreateSnapshot
+ Then the error message should contain "Storage Resource ID cannot be empty"
+
+ Scenario: Create snapshot with invalid name
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol3" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ Then there are no errors
+ And a create snapshot request "snap_#$"
+ When I call CreateSnapshot
+ Then the error message should contain "invalid snapshot name"
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Delete snapshot without giving ID
+ Given a CSI service
+ And a delete snapshot request
+ When I call DeleteSnapshot
+ Then the error message should contain "snapshotId can't be empty"
+
+ Scenario: Delete snapshot with incorrect ID
+ Given a CSI service
+ And a delete snapshot request "snap_not_exist_id"
+ When I call DeleteSnapshot
+ Then there are no errors
+
+ Scenario: Create and delete basic volume successfully and idempotency test
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol4" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ Then there are no errors
+ When I call CreateVolume
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Create and delete basic volume with default protocol successfully
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol39" arrayId "Array1-Id" protocol "" size "2"
+ When I call CreateVolume
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Create volume with 0 size
+ Given a CSI service
+ And a basic block volume request name "gdtest-zerosizevol" arrayId "Array1-Id" protocol "" size "0"
+ When I call CreateVolume
+ Then the error message should contain "RequiredBytes should be greater then 0"
+
+ Scenario: Create and delete existing filesystem with same and different size
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol40" arrayId "Array1-Id" protocol "NFS" size "3"
+ When I call CreateVolume
+ Then there are no errors
+ Given a basic block volume request name "gdtest-vol40" arrayId "Array1-Id" protocol "NFS" size "5"
+ When I call CreateVolume
+ Then the error message should contain "already exists"
+ And a basic block volume request name "gdtest-vol40" arrayId "Array1-Id" protocol "NFS" size "3"
+ When I call CreateVolume
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Create an existing volume with different and same size
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol6" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ Then there are no errors
+ And a basic block volume request name "gdtest-vol6" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ Then the error message should contain "'Volume name' already exists and size is different"
+ And a basic block volume request name "gdtest-vol6" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Create a volume without thinProvisioned parameter
+ Given a CSI service
+ And a basic block volume request with volumeName "gdtest-vol7" arrayId "Array1-Id" protocol "FC" size "2" storagepool "pool_1" thinProvisioned "" isDataReductionEnabled "false" tieringPolicy "0"
+ When I call CreateVolume
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Create a volume without isCompressionEnabled parameter
+ Given a CSI service
+ And a basic block volume request with volumeName "gdtest-vol8" arrayId "Array1-Id" protocol "FC" size "2" storagepool "pool_1" thinProvisioned "true" isDataReductionEnabled "false" tieringPolicy "0"
+ When I call CreateVolume
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Create a volume without isDataReductionEnabled parameter
+ Given a CSI service
+ And a basic block volume request with volumeName "gdtest-vol9" arrayId "Array1-Id" protocol "FC" size "2" storagepool "pool_1" thinProvisioned "true" isDataReductionEnabled "" tieringPolicy "0"
+ When I call CreateVolume
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Create a volume without isDataReductionEnabled parameter
+ Given a CSI service
+ And a basic block volume request with volumeName "gdtest-vol10" arrayId "Array1-Id" protocol "FC" size "2" storagepool "pool_1" thinProvisioned "true" isDataReductionEnabled "false" tieringPolicy ""
+ When I call CreateVolume
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Create a volume with incorrect storage_pool
+ Given a CSI service
+ And a basic block volume request with volumeName "gdtest-vol11" arrayId "Array1-Id" protocol "FC" size "2" storagepool "abcd" thinProvisioned "true" isDataReductionEnabled "false" tieringPolicy "0"
+ When I call CreateVolume
+ Then the error message should contain "Unable to get PoolID"
+
+ Scenario: Create a volume without volume name
+ Given a CSI service
+ And a basic block volume request with volumeName "" arrayId "Array1-Id" protocol "FC" size "2" storagepool "abcd" thinProvisioned "true" isDataReductionEnabled "false" tieringPolicy "0"
+ When I call CreateVolume
+ Then the error message should contain "required: Name"
+
+ Scenario: Create a volume from snapshot of thin volume with idempotency
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol12" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ And there are no errors
+ Given a create snapshot request "snap_volforsnap"
+ When I call CreateSnapshot
+ And there are no errors
+ Given a basic block volume request with volume content source as snapshot with name "gdtest-vol13" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ Then there are no errors
+ When I call CreateVolume
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+ Given a delete snapshot request
+ When I call DeleteSnapshot
+ Then there are no errors
+ And When I call DeleteAllCreatedVolumes
+ Then there are no errors
+
+ Scenario: Create a volume from snapshot of filesystem and create snapshot of cloned filesystem
+ Given a CSI service
+ And a basic block volume request name "gdtest-fssource" arrayId "Array1-Id" protocol "NFS" size "5"
+ When I call CreateVolume
+ And there are no errors
+ Given a create snapshot request "snap_fs-gdtest-fssource"
+ When I call CreateSnapshot
+ And there are no errors
+ Given a basic block volume request with volume content source as snapshot with name "gdtest-fsclone" arrayId "Array1-Id" protocol "NFS" size "5"
+ When I call CreateVolume
+ And there are no errors
+ Given a delete snapshot request
+ When I call DeleteSnapshot
+ Then there are no errors
+ Given a create snapshot request "snap_fs-gdtest-fssource-1"
+ When I call CreateSnapshot
+ And there are no errors
+ Given a delete snapshot request
+ When I call DeleteSnapshot
+ Then there are no errors
+ And When I call DeleteAllCreatedVolumes
+ Then there are no errors
+
+ Scenario: Create a volume from snaphot for NFS protocol with incompatible size
+ Given a CSI service
+ And a basic block volume request name "gdtest-fssource-1" arrayId "Array1-Id" protocol "NFS" size "5"
+ When I call CreateVolume
+ And there are no errors
+ Given a create snapshot request "snap_fs-gdtest-fssource-2"
+ When I call CreateSnapshot
+ And there are no errors
+ Given a basic block volume request with volume content source as snapshot with name "gdtest-fsclone" arrayId "Array1-Id" protocol "NFS" size "8"
+ When I call CreateVolume
+ Then the error message should contain "size"
+ Given a delete snapshot request
+ When I call DeleteSnapshot
+ Then there are no errors
+ And When I call DeleteAllCreatedVolumes
+ Then there are no errors
+
+ Scenario: Create a volume from snapshot that does not exist
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol14" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ And there are no errors
+ Given a create snapshot request "snap_volforsnap"
+ When I call CreateSnapshot
+ And there are no errors
+ Given a delete snapshot request
+ When I call DeleteSnapshot
+ Then there are no errors
+ Given a basic block volume request with volume content source as snapshot with name "gdtest-vol15" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ Then the error message should contain "snapshot not found"
+ And When I call DeleteAllCreatedVolumes
+ Then there are no errors
+
+ Scenario: Create a volume from snapshot and passing an existing name for new volume
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol16" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ And there are no errors
+ Given a create snapshot request "snap_volforsnap"
+ When I call CreateSnapshot
+ And there are no errors
+ Given a basic block volume request with volume content source as snapshot with name "gdtest-vol16" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ Then the error message should contain "already exists"
+ Given a delete snapshot request
+ When I call DeleteSnapshot
+ Then there are no errors
+ And When I call DeleteAllCreatedVolumes
+ Then there are no errors
+
+ Scenario: Clone a volume successfully with idempotency
+ Given a CSI service
+ And a basic block volume request name "gdtest-sourcevol1" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ And there are no errors
+ Given a basic block volume request with volume content source as volume with name "gdtest-clonevol" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ Then there are no errors
+ Given a basic block volume request name "gdtest-sourcevol1" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ And there are no errors
+ Given a basic block volume request with volume content source as volume with name "gdtest-clonevol" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ Then there are no errors
+ And When I call DeleteAllCreatedVolumes
+ Then there are no errors
+
+ Scenario: Publish and unpublish a volume to host
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol17" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ And there are no errors
+ And when I call PublishVolume
+ And there are no errors
+ And when I call UnpublishVolume
+ And there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Publish a volume to host with readonly as true
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol18" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ And there are no errors
+ And when I call PublishVolume with host "host" readonly "true"
+ Then the error message should contain "Readonly must be false"
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Publish and unpublish a volume to host without giving hostname
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol19" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ And there are no errors
+ And when I call PublishVolume with host "" readonly "false"
+ Then the error message should contain "required: NodeID"
+ And when I call UnpublishVolume
+ Then the error message should contain "Node ID is required"
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Publish a volume to host with VolumeCapability_AccessMode other than SINGLE_NODE_WRITER
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol20" arrayId "Array1-Id" protocol "FC" size "5"
+ When I change volume capability accessmode
+ When I call CreateVolume
+ Then the error message should contain "not supported"
+ And when I call PublishVolume
+ Then the error message should contain "Access mode MULTI_NODE_SINGLE_WRITER is not supported"
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Publish and unpublish a volume to host with incorrect hostname
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol21" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ And there are no errors
+ And when I call PublishVolume with host "host" readonly "false"
+ Then the error message should contain "unable to find host"
+ And when I call UnpublishVolume
+ Then the error message should contain "unable to find host"
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Publish and unpublish a volume to host without giving volume id
+ Given a CSI service
+ And when I call PublishVolume with volumeId ""
+ Then the error message should contain "required: VolumeID"
+ When I call UnpublishVolume with volumeId ""
+ Then the error message should contain "required: VolumeID"
+
+ Scenario: Publish and unpublish a volume to host with deleted volume
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol41" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ And there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+ And when I call PublishVolume
+ Then the error message should contain "Find volume Failed"
+ And when I call UnpublishVolume
+ Then there are no errors
+
+ Scenario: Publish and unpublish a volume to host with deleted filesystem
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol42" arrayId "Array1-Id" protocol "NFS" size "5"
+ When I call CreateVolume
+ And there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+ And when I call PublishVolume
+ Then the error message should contain "failed"
+ And when I call UnpublishVolume
+ Then there are no errors
+
+ Scenario: Publish and unpublish volume idempotency
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol22" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ And there are no errors
+ And when I call PublishVolume
+ And there are no errors
+ And when I call PublishVolume
+ Then there are no errors
+ And when I call UnpublishVolume
+ And there are no errors
+ And when I call UnpublishVolume
+ And there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Validate volume capabilities with same access mode
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol24" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ Then there are no errors
+ When I call validate volume capabilities with protocol "FC" with same access mode
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Validate volume capabilities with different access mode
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol25" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ Then there are no errors
+ When I call validate volume capabilities with protocol "FC" with different access mode
+ Then the error message should contain "Unsupported capability"
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Validate volume capabilities with incorrect volume Id
+ Given a CSI service
+ When I call validate volume capabilities with protocol "FC" with volume ID "xyz"
+ Then the error message should contain "Volume not found"
+
+ Scenario: Controller get capabilities
+ Given a CSI service
+ When I call Controller Get Capabilities
+ Then there are no errors
+
+ Scenario: Controller expand volume
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol26" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ Then there are no errors
+ When I call Controller Expand Volume "3"
+ Then there are no errors
+ And a basic block volume request name "gdtest-vol26" arrayId "Array1-Id" protocol "FC" size "3"
+ When I call CreateVolume
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Controller expand volume for NFS protocol
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol26" arrayId "Array1-Id" protocol "NFS" size "5"
+ When I call CreateVolume
+ Then there are no errors
+ When I call Controller Expand Volume "8"
+ Then there are no errors
+ And a basic block volume request name "gdtest-vol26" arrayId "Array1-Id" protocol "NFS" size "8"
+ When I call CreateVolume
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Controller expand volume for cloned NFS volume
+ Given a CSI service
+ And a basic block volume request name "gdtest-snapofclonevol" arrayId "Array1-Id" protocol "NFS" size "5"
+ When I call CreateVolume
+ And there are no errors
+ Given a create snapshot request "snap_volforsnap_1"
+ When I call CreateSnapshot
+ And there are no errors
+ Given a basic block volume request with volume content source as snapshot with name "gdtest-vol13_1" arrayId "Array1-Id" protocol "NFS" size "5"
+ When I call CreateVolume
+ Then there are no errors
+ When I call Controller Expand Volume "8"
+ Then the error message should contain "snapshot"
+ And when I call DeleteVolume
+ Then there are no errors
+ Given a delete snapshot request
+ When I call DeleteSnapshot
+ Then there are no errors
+ And When I call DeleteAllCreatedVolumes
+ Then there are no errors
+
+ Scenario: Controller expand volume with same new size
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol27" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ Then there are no errors
+ When I call Controller Expand Volume "2"
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Controller expand volume with smaller new size
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol28" arrayId "Array1-Id" protocol "FC" size "3"
+ When I call CreateVolume
+ Then there are no errors
+ When I call Controller Expand Volume "2"
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Controller expand volume with new size as 0
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol29" arrayId "Array1-Id" protocol "FC" size "2"
+ When I call CreateVolume
+ Then there are no errors
+ When I call Controller Expand Volume "0"
+ Then the error message should contain "Required bytes can not be 0 or less"
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Controller expand volume with volume that does not exist
+ Given a CSI service
+ When I call Controller Expand Volume "2" with volume "abcd"
+ Then the error message should contain "Unable to find volume"
+
+ Scenario: Controller expand volume without giving volume
+ Given a CSI service
+ When I call Controller Expand Volume "2" with volume ""
+ Then the error message should contain "required"
+
+ Scenario: Node stage, publish, unpublish and unstage volume
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol30" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ And there are no errors
+ And when I call PublishVolume
+ And there are no errors
+ And when I call NodeStageVolume fsType "ext4"
+ And there are no errors
+ And when I call NodePublishVolume fsType "ext4" readonly "false"
+ Then there are no errors
+ And when I call NodeUnPublishVolume
+ And there are no errors
+ And when I call NodeUnstageVolume
+ And there are no errors
+ And when I call UnpublishVolume
+ And there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Node publish volume with readonly as true
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol31" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ And there are no errors
+ And when I call PublishVolume
+ And there are no errors
+ And when I call NodeStageVolume fsType "ext4"
+ And there are no errors
+ And when I call NodePublishVolume fsType "ext4" readonly "true"
+ Then there are no errors
+ And when I call NodeUnPublishVolume
+ And there are no errors
+ And when I call NodeUnstageVolume
+ And there are no errors
+ And when I call UnpublishVolume
+ And there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Node publish volume without volume id
+ Given a CSI service
+ And when I call NodePublishVolume fsType "ext4" readonly "false"
+ Then the error message should contain "required: VolumeID"
+
+ Scenario: Node publish volume without controller publish volume
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol33" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ And there are no errors
+ And when I call NodePublishVolume fsType "ext4" readonly "false"
+ Then the error message should contain "no such file or directory"
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Node unpublish when node publish never hapened
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol34" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ And there are no errors
+ And when I call NodePublishVolume fsType "ext4" readonly "false"
+ Then the error message should contain "no such file or directory"
+ And when I call NodeUnPublishVolume
+ Then there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Node Get Capabilities
+ Given a CSI service
+ And When I call NodeGetCapabilities
+ Then there are no errors
+
+ Scenario: Node publish volume without volume capabilities
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol35" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ And there are no errors
+ And when I call PublishVolume
+ And there are no errors
+ And when I call NodePublishVolume without accessmode and fsType "ext4"
+ Then the error message should contain "required: AccessMode"
+ And when I call UnpublishVolume
+ And there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Node stage, publish, unpublish and unstage volume idempotency
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol36" arrayId "Array1-Id" protocol "FC" size "5"
+ When I call CreateVolume
+ And there are no errors
+ And when I call PublishVolume
+ And there are no errors
+ And when I call NodeStageVolume fsType "ext4"
+ And there are no errors
+ And when I call NodeStageVolume fsType "ext4"
+ And there are no errors
+ And when I call NodePublishVolume fsType "ext4" readonly "false"
+ Then there are no errors
+ And when I call NodeUnPublishVolume
+ And there are no errors
+ And when I call NodeUnPublishVolume
+ Then there are no errors
+ And when I call NodeUnstageVolume
+ And there are no errors
+ And when I call NodeUnstageVolume
+ And there are no errors
+ And when I call UnpublishVolume
+ And there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Get Plugin capabilities
+ Given a CSI service
+ And When I call GetPluginCapabilities
+ Then there are no errors
+
+ Scenario: Get Plugin info
+ Given a CSI service
+ And When I call GetPluginInfo
+ Then there are no errors
+
+ Scenario: NodeGetInfo
+ Given a CSI service
+ And When I call NodeGetInfo
+ Then the error message should contain "not added"
+
+ Scenario: Node stage, publish, unpublish and unstage volume for iSCSI
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol37" arrayId "Array1-Id" protocol "iSCSI" size "5"
+ When I call CreateVolume
+ And there are no errors
+ And when I call PublishVolume
+ And there are no errors
+ And when I call NodeStageVolume fsType "ext4"
+ And there are no errors
+ And when I call NodePublishVolume fsType "ext4" readonly "false"
+ Then there are no errors
+ And when I call NodeUnPublishVolume
+ And there are no errors
+ And when I call NodeUnstageVolume
+ And there are no errors
+ And when I call UnpublishVolume
+ And there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+ And when I call NodeUnstageVolume
+ And there are no errors
+ And when I call UnpublishVolume
+ And there are no errors
+
+ Scenario: Node stage, publish, unpublish and unstage volume for NFS with idempotency
+ Given a CSI service
+ And a basic block volume request name "gdtest-vol38" arrayId "Array1-Id" protocol "NFS" size "5"
+ When I call CreateVolume
+ And there are no errors
+ And when I call PublishVolume
+ And there are no errors
+ And when I call PublishVolume
+ And there are no errors
+ And when I call NodeStageVolume fsType ""
+ And there are no errors
+ And when I call NodeStageVolume fsType ""
+ And there are no errors
+ And when I call NodePublishVolume fsType "" readonly "false"
+ Then there are no errors
+ And when I call NodePublishVolume fsType "" readonly "false"
+ Then there are no errors
+ And when I call NodeUnPublishVolume
+ And there are no errors
+ And when I call NodeUnPublishVolume
+ And there are no errors
+ And when I call NodeUnstageVolume
+ And there are no errors
+ And when I call NodeUnstageVolume
+ And there are no errors
+ And when I call UnpublishVolume
+ And there are no errors
+ And when I call UnpublishVolume
+ And there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Node stage, publish, unpublish and unstage volume for NFS with accessmode "ROX"
+ Given a CSI service
+ And a basic filesystem request name "gdtest-vol43" arrayId "Array1-Id" protocol "NFS" accessMode "ROX" size "5"
+ When I call CreateVolume
+ And there are no errors
+ And when I call PublishVolume
+ And there are no errors
+ And when I call PublishVolume
+ And there are no errors
+ And when I call NodeStageVolume fsType ""
+ And there are no errors
+ And when I call NodeStageVolume fsType ""
+ And there are no errors
+ And when I call NodePublishVolume fsType "" readonly "false"
+ Then there are no errors
+ And when I call NodePublishVolume fsType "" readonly "false"
+ Then there are no errors
+ And when I call NodeUnPublishVolume
+ And there are no errors
+ And when I call NodeUnPublishVolume
+ And there are no errors
+ And when I call NodeUnstageVolume
+ And there are no errors
+ And when I call NodeUnstageVolume
+ And there are no errors
+ And when I call UnpublishVolume
+ And there are no errors
+ And when I call UnpublishVolume
+ And there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+
+ Scenario: Node stage, publish, unpublish and unstage volume for NFS with accessmode "RWX"
+ Given a CSI service
+ And a basic filesystem request name "gdtest-vol44" arrayId "Array1-Id" protocol "NFS" accessMode "RWX" size "5"
+ When I call CreateVolume
+ And there are no errors
+ And when I call PublishVolume
+ And there are no errors
+ And when I call PublishVolume
+ And there are no errors
+ And when I call NodeStageVolume fsType ""
+ And there are no errors
+ And when I call NodeStageVolume fsType ""
+ And there are no errors
+ And when I call NodePublishVolume fsType "" readonly "false"
+ Then there are no errors
+ And when I call NodePublishVolume fsType "" readonly "false"
+ Then there are no errors
+ And when I call NodeUnPublishVolume
+ And there are no errors
+ And when I call NodeUnPublishVolume
+ And there are no errors
+ And when I call NodeUnstageVolume
+ And there are no errors
+ And when I call NodeUnstageVolume
+ And there are no errors
+ And when I call UnpublishVolume
+ And there are no errors
+ And when I call UnpublishVolume
+ And there are no errors
+ And when I call DeleteVolume
+ Then there are no errors
+ And when I call NodeUnstageVolume
+ And there are no errors
+ And when I call UnpublishVolume
+ And there are no errors
\ No newline at end of file
diff --git a/test/unit-test/unit_main_test.go b/test/unit-test/unit_main_test.go
index 71bb1e5f..c5be6c46 100644
--- a/test/unit-test/unit_main_test.go
+++ b/test/unit-test/unit_main_test.go
@@ -2,23 +2,51 @@ package unit_test
import (
"context"
+ "encoding/json"
"fmt"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "testing"
+ "time"
+
"github.com/DATA-DOG/godog"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/dell/csi-unity/provider"
"github.com/dell/csi-unity/service"
"github.com/rexray/gocsi/utils"
"google.golang.org/grpc"
- "os"
- "testing"
- "time"
)
var grpcClient *grpc.ClientConn
var stop func()
+//To parse the secret json file
+type StorageArrayList struct {
+ StorageArrayList []StorageArrayConfig `json:"storageArrayList"`
+}
+
+type StorageArrayConfig struct {
+ ArrayId string `json:"arrayId"`
+}
+
func TestMain(m *testing.M) {
os.Setenv("X_CSI_MODE", "")
+
+ file, err := ioutil.ReadFile(os.Getenv("DRIVER_CONFIG"))
+ if err != nil {
+ panic("Driver Config missing")
+ }
+ arrayIdList := StorageArrayList{}
+ _ = json.Unmarshal([]byte(file), &arrayIdList)
+ if len(arrayIdList.StorageArrayList) == 0 {
+ panic("Array Info not provided")
+ }
+ for i := 0; i < len(arrayIdList.StorageArrayList); i++ {
+ arrayIdvar := "Array" + strconv.Itoa(i+1) + "-Id"
+ os.Setenv(arrayIdvar, arrayIdList.StorageArrayList[i].ArrayId)
+ }
+
ctx := context.Background()
fmt.Printf("calling startServer")
grpcClient, stop = startServer(ctx)
diff --git a/test/unit-test/unit_test.go b/test/unit-test/unit_test.go
index 24035c68..c45955df 100644
--- a/test/unit-test/unit_test.go
+++ b/test/unit-test/unit_test.go
@@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"os"
+ "path"
"strconv"
"strings"
"time"
@@ -79,6 +80,35 @@ func (f *feature) aCSIService() error {
return nil
}
+//aCSIServiceWithNode method is used to add node and initiators on array
+func (f *feature) aCSIServiceWithNode() error {
+ stop()
+ time.Sleep(10 * time.Second)
+ os.Setenv("X_CSI_MODE", "node")
+ ctx := context.Background()
+ grpcClient, stop = startServer(ctx)
+ time.Sleep(5 * time.Second)
+
+ ctx = context.Background()
+ fmt.Printf("testing Identity Probe\n")
+ client := csi.NewIdentityClient(grpcClient)
+ probeResp, err := client.Probe(ctx, &csi.ProbeRequest{})
+ time.Sleep(120 * time.Second)
+ if err != nil {
+ fmt.Printf("Probe failed with error: %s:\n", err.Error())
+ } else {
+ fmt.Printf("Probe passed: %s\n", probeResp.Ready)
+ }
+
+ stop()
+ time.Sleep(10 * time.Second)
+ os.Setenv("X_CSI_MODE", "")
+ ctx = context.Background()
+ grpcClient, stop = startServer(ctx)
+ time.Sleep(5 * time.Second)
+ return nil
+}
+
//aBasicBlockVolumeRequest method is used to build a Create volume request
func (f *feature) aBasicBlockVolumeRequest(volumeName, arrayId, protocol string, size int) error {
f.createVolumeRequest = nil
@@ -89,8 +119,9 @@ func (f *feature) aBasicBlockVolumeRequest(volumeName, arrayId, protocol string,
params["isDataReductionEnabled"] = "false"
params["tieringPolicy"] = "0"
params["description"] = "CSI Volume Unit Test"
- params["arrayId"] = arrayId
+ params["arrayId"] = os.Getenv(arrayId)
params["protocol"] = protocol
+ params["nasServer"] = os.Getenv("NAS_SERVER")
req.Parameters = params
req.Name = volumeName
capacityRange := new(csi.CapacityRange)
@@ -112,6 +143,48 @@ func (f *feature) aBasicBlockVolumeRequest(volumeName, arrayId, protocol string,
return nil
}
+//aBasicFilesystemRequest method is used to build a Create volume request for filesystem
+func (f *feature) aBasicFilesystemRequest(volumeName, arrayId, protocol, am string, size int) error {
+ f.createVolumeRequest = nil
+ req := new(csi.CreateVolumeRequest)
+ params := make(map[string]string)
+ params["storagePool"] = os.Getenv("STORAGE_POOL")
+ params["thinProvisioned"] = "true"
+ params["isDataReductionEnabled"] = "false"
+ params["tieringPolicy"] = "0"
+ params["description"] = "CSI Volume Unit Test"
+ params["arrayId"] = os.Getenv(arrayId)
+ params["protocol"] = protocol
+ params["nasServer"] = os.Getenv("NAS_SERVER")
+ req.Parameters = params
+ req.Name = volumeName
+ capacityRange := new(csi.CapacityRange)
+ capacityRange.RequiredBytes = int64(size * 1024 * 1024 * 1024)
+ req.CapacityRange = capacityRange
+ capability := new(csi.VolumeCapability)
+ mount := new(csi.VolumeCapability_MountVolume)
+ mountType := new(csi.VolumeCapability_Mount)
+ mountType.Mount = mount
+ capability.AccessType = mountType
+ accessMode := new(csi.VolumeCapability_AccessMode)
+ if am == "ROX" {
+ accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY
+ } else if am == "RWX" {
+ accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
+ } else if am == "RWO" {
+ accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
+ } else {
+ accessMode.Mode = csi.VolumeCapability_AccessMode_UNKNOWN
+ }
+ capability.AccessMode = accessMode
+ f.capability = capability
+ capabilities := make([]*csi.VolumeCapability, 0)
+ capabilities = append(capabilities, capability)
+ req.VolumeCapabilities = capabilities
+ f.createVolumeRequest = req
+ return nil
+}
+
//aBasicBlockVolumeRequestWithParameters method is used to build a Create volume request with parameters
func (f *feature) aBasicBlockVolumeRequestWithParameters(volumeName, arrayId, protocol string, size int, storagepool, thinProvisioned, isDataReductionEnabled, tieringPolicy string) error {
f.createVolumeRequest = nil
@@ -119,8 +192,6 @@ func (f *feature) aBasicBlockVolumeRequestWithParameters(volumeName, arrayId, pr
params := make(map[string]string)
if storagepool == "id" {
params["storagePool"] = os.Getenv("STORAGE_POOL")
- } else if storagepool == "name" {
- params["storagePool"] = os.Getenv("STORAGE_POOL_NAME")
} else {
params["storagePool"] = storagepool
}
@@ -128,8 +199,9 @@ func (f *feature) aBasicBlockVolumeRequestWithParameters(volumeName, arrayId, pr
params["isDataReductionEnabled"] = isDataReductionEnabled
params["tieringPolicy"] = tieringPolicy
params["description"] = "CSI Volume Unit Test"
- params["arrayId"] = arrayId
+ params["arrayId"] = os.Getenv(arrayId)
params["protocol"] = protocol
+ params["nasServer"] = os.Getenv("NAS_SERVER")
req.Parameters = params
req.Name = volumeName
capacityRange := new(csi.CapacityRange)
@@ -157,7 +229,7 @@ func (f *feature) aBasicBlockVolumeRequestWithVolumeContentSource(volumeName, ar
req := new(csi.CreateVolumeRequest)
params := make(map[string]string)
params["storagePool"] = os.Getenv("STORAGE_POOL")
- params["arrayId"] = arrayId
+ params["arrayId"] = os.Getenv(arrayId)
params["protocol"] = protocol
req.Parameters = params
req.Name = volumeName
@@ -187,6 +259,42 @@ func (f *feature) aBasicBlockVolumeRequestWithVolumeContentSource(volumeName, ar
return nil
}
+//aBasicBlockVolumeRequest method with volume content source as volume
+func (f *feature) aBasicBlockVolumeRequestWithVolumeContentSourceAsVolume(volumeName, arrayId, protocol string, size int) error {
+ f.createVolumeRequest = nil
+ req := new(csi.CreateVolumeRequest)
+ params := make(map[string]string)
+ params["storagePool"] = os.Getenv("STORAGE_POOL")
+ params["arrayId"] = os.Getenv(arrayId)
+ params["protocol"] = protocol
+ req.Parameters = params
+ req.Name = volumeName
+ capacityRange := new(csi.CapacityRange)
+ capacityRange.RequiredBytes = int64(size * 1024 * 1024 * 1024)
+ req.CapacityRange = capacityRange
+ capability := new(csi.VolumeCapability)
+ mount := new(csi.VolumeCapability_MountVolume)
+ mountType := new(csi.VolumeCapability_Mount)
+ mountType.Mount = mount
+ capability.AccessType = mountType
+ accessMode := new(csi.VolumeCapability_AccessMode)
+ accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
+ capability.AccessMode = accessMode
+ f.capability = capability
+ capabilities := make([]*csi.VolumeCapability, 0)
+ capabilities = append(capabilities, capability)
+ req.VolumeCapabilities = capabilities
+ volumeContentSource_VolumeSource := new(csi.VolumeContentSource_VolumeSource)
+ volumeContentSource_VolumeSource.VolumeId = f.createVolumeResponse.GetVolume().GetVolumeId()
+ volumeContentSource_Volume := new(csi.VolumeContentSource_Volume)
+ volumeContentSource_Volume.Volume = volumeContentSource_VolumeSource
+ volumeContentSource := new(csi.VolumeContentSource)
+ volumeContentSource.Type = volumeContentSource_Volume
+ req.VolumeContentSource = volumeContentSource
+ f.createVolumeRequest = req
+ return nil
+}
+
//iChangeVolumeCapabilityAccessmode is a method to change volume capabilities access mode
func (f *feature) iChangeVolumeCapabilityAccessmode() error {
f.createVolumeRequest.VolumeCapabilities[0].AccessMode.Mode = 4
@@ -631,8 +739,8 @@ func (f *feature) whenICallNodePublishVolume(fsType, readonly string) error {
} else {
req.VolumeId = ""
}
- req.StagingTargetPath = os.Getenv("X_CSI_STAGING_TARGET_PATH")
- req.TargetPath = os.Getenv("X_CSI_PUBLISH_TARGET_PATH")
+ req.StagingTargetPath = path.Join(os.Getenv("X_CSI_STAGING_TARGET_PATH"), f.volID)
+ req.TargetPath = path.Join(os.Getenv("X_CSI_PUBLISH_TARGET_PATH"), f.volID)
capability := new(csi.VolumeCapability)
mount := new(csi.VolumeCapability_MountVolume)
mount.FsType = fsType
@@ -640,7 +748,7 @@ func (f *feature) whenICallNodePublishVolume(fsType, readonly string) error {
mountType.Mount = mount
capability.AccessType = mountType
accessMode := new(csi.VolumeCapability_AccessMode)
- accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
+ accessMode.Mode = f.capability.AccessMode.Mode
capability.AccessMode = accessMode
req.VolumeCapability = capability
read, _ := strconv.ParseBool(readonly)
@@ -668,7 +776,7 @@ func (f *feature) whenICallNodePublishVolumeWithTargetPath(target_path, fsType s
} else {
req.VolumeId = ""
}
- req.StagingTargetPath = os.Getenv("X_CSI_STAGING_TARGET_PATH")
+ req.StagingTargetPath = path.Join(os.Getenv("X_CSI_STAGING_TARGET_PATH"), f.volID)
req.TargetPath = target_path
capability := new(csi.VolumeCapability)
mount := new(csi.VolumeCapability_MountVolume)
@@ -677,7 +785,7 @@ func (f *feature) whenICallNodePublishVolumeWithTargetPath(target_path, fsType s
mountType.Mount = mount
capability.AccessType = mountType
accessMode := new(csi.VolumeCapability_AccessMode)
- accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
+ accessMode.Mode = f.capability.AccessMode.Mode
capability.AccessMode = accessMode
req.VolumeCapability = capability
req.Readonly = false
@@ -704,7 +812,7 @@ func (f *feature) whenICallNodePublishVolumeWithoutAccessmode(fsType string) err
} else {
req.VolumeId = ""
}
- req.StagingTargetPath = os.Getenv("X_CSI_STAGING_TARGET_PATH")
+ req.StagingTargetPath = path.Join(os.Getenv("X_CSI_STAGING_TARGET_PATH"), f.volID)
capability := new(csi.VolumeCapability)
mount := new(csi.VolumeCapability_MountVolume)
mount.FsType = fsType
@@ -713,7 +821,7 @@ func (f *feature) whenICallNodePublishVolumeWithoutAccessmode(fsType string) err
capability.AccessType = mountType
capability.AccessMode = nil
req.VolumeCapability = capability
- req.TargetPath = os.Getenv("X_CSI_PUBLISH_TARGET_PATH")
+ req.TargetPath = path.Join(os.Getenv("X_CSI_PUBLISH_TARGET_PATH"), f.volID)
req.Readonly = false
f.nodePublishVolumeRequest = req
@@ -765,7 +873,7 @@ func (f *feature) whenICallNodeStageVolume(fsType string) error {
if f.createVolumeResponse == nil {
req.VolumeId = "NoID"
}
- req.StagingTargetPath = os.Getenv("X_CSI_STAGING_TARGET_PATH")
+ req.StagingTargetPath = path.Join(os.Getenv("X_CSI_STAGING_TARGET_PATH"), f.volID)
capability := new(csi.VolumeCapability)
mount := new(csi.VolumeCapability_MountVolume)
mount.FsType = fsType
@@ -773,7 +881,7 @@ func (f *feature) whenICallNodeStageVolume(fsType string) error {
mountType.Mount = mount
capability.AccessType = mountType
accessMode := new(csi.VolumeCapability_AccessMode)
- accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
+ accessMode.Mode = f.capability.AccessMode.Mode
capability.AccessMode = accessMode
req.VolumeCapability = capability
f.nodeStageVolumeRequest = req
@@ -806,7 +914,7 @@ func (f *feature) whenICallNodeStageVolumeWithTargetPath(fsType, target_path str
mountType.Mount = mount
capability.AccessType = mountType
accessMode := new(csi.VolumeCapability_AccessMode)
- accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
+ accessMode.Mode = f.capability.AccessMode.Mode
capability.AccessMode = accessMode
req.VolumeCapability = capability
f.nodeStageVolumeRequest = req
@@ -915,110 +1023,16 @@ func (f *feature) whenICallGetPluginInfo() error {
return nil
}
-//aCSIServiceWithoutCSIUnityEndpoint - Test case to call probe without CSI Unity endpoint
-func (f *feature) aCSIServiceWithoutCSIUnityEndpoint() error {
- stop()
- time.Sleep(10 * time.Second)
- os.Setenv("X_CSI_UNITY_ENDPOINT", "")
- os.Setenv("X_CSI_MODE", "controller")
- ctx := context.Background()
- grpcClient, stop = startServer(ctx)
- time.Sleep(5 * time.Second)
- client := csi.NewIdentityClient(grpcClient)
- probeResp, err := client.Probe(ctx, &csi.ProbeRequest{})
- if err != nil {
- fmt.Printf("Controller Probe failed with error: %s:\n", err.Error())
- f.addError(err)
- } else {
- fmt.Printf("Controller Probe passed: %s\n", probeResp.Ready)
- }
- stop()
- time.Sleep(10 * time.Second)
- os.Setenv("X_CSI_MODE", "node")
- ctx = context.Background()
- grpcClient, stop = startServer(ctx)
- time.Sleep(10 * time.Second)
- client = csi.NewIdentityClient(grpcClient)
- probeResp, err = client.Probe(ctx, &csi.ProbeRequest{})
- if err != nil {
- fmt.Printf("Node Probe failed with error: %s:\n", err.Error())
- f.addError(err)
- } else {
- fmt.Printf("Node Probe passed: %s\n", probeResp.Ready)
- }
-
- os.Setenv("X_CSI_UNITY_ENDPOINT", "https://1.1.1.1")
- os.Setenv("X_CSI_MODE", "")
- return nil
-}
-
-//aCSIServiceWithCSIUnityPassword - Test case to call probe with CSI Unity password as parameter
-func (f *feature) aCSIServiceWithCSIUnityPassword(password string) error {
- stop()
- time.Sleep(10 * time.Second)
- os.Setenv("X_CSI_UNITY_PASSWORD", password)
- os.Setenv("X_CSI_MODE", "controller")
- ctx := context.Background()
- grpcClient, stop = startServer(ctx)
- time.Sleep(10 * time.Second)
- client := csi.NewIdentityClient(grpcClient)
- probeResp, err := client.Probe(ctx, &csi.ProbeRequest{})
- if err != nil {
- fmt.Printf("Controller Probe failed with error: %s:\n", err.Error())
- f.addError(err)
- } else {
- fmt.Printf("Controller Probe passed: %s\n", probeResp.Ready)
- }
- stop()
- time.Sleep(10 * time.Second)
- os.Setenv("X_CSI_MODE", "node")
- ctx = context.Background()
- grpcClient, stop = startServer(ctx)
- time.Sleep(10 * time.Second)
- client = csi.NewIdentityClient(grpcClient)
- probeResp, err = client.Probe(ctx, &csi.ProbeRequest{})
- if err != nil {
- fmt.Printf("Node Probe failed with error: %s:\n", err.Error())
- f.addError(err)
- } else {
- fmt.Printf("Node Probe passed: %s\n", probeResp.Ready)
- }
-
- os.Setenv("X_CSI_UNITY_PASSWORD", "Password123!")
- os.Setenv("X_CSI_MODE", "")
- return nil
-}
-
-//whenICallNodeGetInfoHostname - Test case to call node get info with hostname
-func (f *feature) whenICallNodeGetInfoHostname(hostname string) error {
- stop()
- time.Sleep(10 * time.Second)
- os.Setenv("X_CSI_UNITY_NODENAME", hostname)
- ctx := context.Background()
- grpcClient, stop = startServer(ctx)
- time.Sleep(5 * time.Second)
- client := csi.NewNodeClient(grpcClient)
- _, err := client.NodeGetInfo(ctx, &csi.NodeGetInfoRequest{})
- if err != nil {
- fmt.Printf("Node get info failed with error: %s:\n", err.Error())
- f.addError(err)
- } else {
- fmt.Printf("Node get info passed\n")
- }
-
- os.Setenv("X_CSI_UNITY_NODENAME", "lgloc183")
- return nil
-}
-
func FeatureContext(s *godog.Suite) {
f := &feature{}
s.Step(`^a CSI service$`, f.aCSIService)
- s.Step(`^a CSI service without CSI Unity Endpoint$`, f.aCSIServiceWithoutCSIUnityEndpoint)
- s.Step(`^a CSI service with CSI Unity Password "([^"]*)"$`, f.aCSIServiceWithCSIUnityPassword)
+ s.Step(`^a CSI service with node$`, f.aCSIServiceWithNode)
s.Step(`^a basic block volume request name "([^"]*)" arrayId "([^"]*)" protocol "([^"]*)" size "(\d+)"$`, f.aBasicBlockVolumeRequest)
+ s.Step(`^a basic filesystem request name "([^"]*)" arrayId "([^"]*)" protocol "([^"]*)" accessMode "([^"]*)" size "(\d+)"$`, f.aBasicFilesystemRequest)
s.Step(`^I change volume capability accessmode$`, f.iChangeVolumeCapabilityAccessmode)
s.Step(`^a basic block volume request with volumeName "([^"]*)" arrayId "([^"]*)" protocol "([^"]*)" size "([^"]*)" storagepool "([^"]*)" thinProvisioned "([^"]*)" isDataReductionEnabled "([^"]*)" tieringPolicy "([^"]*)"$`, f.aBasicBlockVolumeRequestWithParameters)
- s.Step(`^a basic block volume request with volume content source with name "([^"]*)" arrayId "([^"]*)" protocol "([^"]*)" size "([^"]*)"$`, f.aBasicBlockVolumeRequestWithVolumeContentSource)
+ s.Step(`^a basic block volume request with volume content source as snapshot with name "([^"]*)" arrayId "([^"]*)" protocol "([^"]*)" size "([^"]*)"$`, f.aBasicBlockVolumeRequestWithVolumeContentSource)
+ s.Step(`^a basic block volume request with volume content source as volume with name "([^"]*)" arrayId "([^"]*)" protocol "([^"]*)" size "([^"]*)"$`, f.aBasicBlockVolumeRequestWithVolumeContentSourceAsVolume)
s.Step(`^I call CreateVolume$`, f.iCallCreateVolume)
s.Step(`^when I call DeleteVolume$`, f.whenICallDeleteVolume)
s.Step(`^When I call DeleteAllCreatedVolumes$`, f.whenICallDeleteAllCreatedVolumes)
@@ -1047,7 +1061,6 @@ func FeatureContext(s *godog.Suite) {
s.Step(`^when I call NodeStageVolume fsType "([^"]*)" with StagingTargetPath "([^"]*)"$`, f.whenICallNodeStageVolumeWithTargetPath)
s.Step(`^when I call NodeUnstageVolume$`, f.whenICallNodeUnstageVolume)
s.Step(`^When I call NodeGetInfo$`, f.whenICallNodeGetInfo)
- s.Step(`^When I call NodeGetInfo hostname "([^"]*)"$`, f.whenICallNodeGetInfoHostname)
s.Step(`^When I call NodeGetCapabilities$`, f.whenICallNodeGetCapabilities)
s.Step(`^when I call NodePublishVolume without accessmode and fsType "([^"]*)"$`, f.whenICallNodePublishVolumeWithoutAccessmode)
s.Step(`^When I call GetPluginCapabilities$`, f.whenICallGetPluginCapabilities)