From 7d4f284dbae963143617e7d8b0dc7c6887df6a63 Mon Sep 17 00:00:00 2001 From: Robin Schneider Date: Wed, 19 Jun 2024 14:30:11 +0200 Subject: [PATCH] Cherrypick SKE changes --- Dockerfile | 87 ++++++++++--------- pkg/csi/cinder/controllerserver.go | 19 ++++ pkg/csi/cinder/openstack/openstack.go | 2 + pkg/csi/cinder/openstack/openstack_mock.go | 15 ++++ pkg/csi/cinder/openstack/openstack_volumes.go | 45 ++++++++-- 5 files changed, 119 insertions(+), 49 deletions(-) diff --git a/Dockerfile b/Dockerfile index fb06487950..0a397bae26 100644 --- a/Dockerfile +++ b/Dockerfile @@ -103,49 +103,6 @@ LABEL name="barbican-kms-plugin" \ CMD ["sh", "-c", "/bin/barbican-kms-plugin --socketpath ${socketpath} --cloud-config ${cloudconfig}"] -## -## cinder-csi-plugin -## - -# step 1: copy all necessary files from Debian distro to /dest folder -# all magic happens in tools/csi-deps.sh -FROM --platform=${TARGETPLATFORM} ${DEBIAN_IMAGE} as cinder-csi-plugin-utils - -RUN clean-install bash rsync mount udev btrfs-progs e2fsprogs xfsprogs util-linux -COPY tools/csi-deps.sh /tools/csi-deps.sh -RUN /tools/csi-deps.sh - -# step 2: check if all necessary files are copied and work properly -# the build have to finish without errors, but the result image will not be used -FROM --platform=${TARGETPLATFORM} ${DISTROLESS_IMAGE} as cinder-csi-plugin-utils-check - -COPY --from=cinder-csi-plugin-utils /dest / -COPY --from=cinder-csi-plugin-utils /bin/sh /bin/sh -COPY tools/csi-deps-check.sh /tools/csi-deps-check.sh - -SHELL ["/bin/sh"] -RUN /tools/csi-deps-check.sh - -# step 3: build tiny cinder-csi-plugin image with only necessary files -FROM --platform=${TARGETPLATFORM} ${DISTROLESS_IMAGE} as cinder-csi-plugin - -# Copying csi-deps-check.sh simply ensures that the resulting image has a dependency -# on cinder-csi-plugin-utils-check and therefore that the check has passed -COPY --from=cinder-csi-plugin-utils-check /tools/csi-deps-check.sh /bin/csi-deps-check.sh -COPY --from=cinder-csi-plugin-utils /dest / -COPY --from=builder /build/cinder-csi-plugin /bin/cinder-csi-plugin -COPY --from=certs /etc/ssl/certs /etc/ssl/certs - -LABEL name="cinder-csi-plugin" \ - license="Apache Version 2.0" \ - maintainers="Kubernetes Authors" \ - description="Cinder CSI Plugin" \ - distribution-scope="public" \ - summary="Cinder CSI Plugin" \ - help="none" - -CMD ["/bin/cinder-csi-plugin"] - ## ## k8s-keystone-auth ## @@ -222,3 +179,47 @@ LABEL name="octavia-ingress-controller" \ help="none" CMD ["/bin/octavia-ingress-controller"] + +## SKE: Concourse only pushes the last built image. Therefore we have to move this to the bottom. +## +## cinder-csi-plugin +## + +# step 1: copy all necessary files from Debian distro to /dest folder +# all magic heppens in tools/csi-deps.sh +FROM --platform=${TARGETPLATFORM} ${DEBIAN_IMAGE} as cinder-csi-plugin-utils + +RUN clean-install bash rsync mount udev btrfs-progs e2fsprogs xfsprogs +COPY tools/csi-deps.sh /tools/csi-deps.sh +RUN /tools/csi-deps.sh + +# step 2: check if all necessary files are copied and work properly +# the build have to finish without errors, but the result image will not be used +FROM --platform=${TARGETPLATFORM} ${DISTROLESS_IMAGE} as cinder-csi-plugin-utils-check + +COPY --from=cinder-csi-plugin-utils /dest / +COPY --from=cinder-csi-plugin-utils /bin/sh /bin/sh +COPY tools/csi-deps-check.sh /tools/csi-deps-check.sh + +SHELL ["/bin/sh"] +RUN /tools/csi-deps-check.sh + +# step 3: build tiny cinder-csi-plugin image with only necessary files +FROM --platform=${TARGETPLATFORM} ${DISTROLESS_IMAGE} as cinder-csi-plugin + +# Copying csi-deps-check.sh simply ensures that the resulting image has a dependency +# on cinder-csi-plugin-utils-check and therefore that the check has passed +COPY --from=cinder-csi-plugin-utils-check /tools/csi-deps-check.sh /bin/csi-deps-check.sh +COPY --from=cinder-csi-plugin-utils /dest / +COPY --from=builder /build/cinder-csi-plugin /bin/cinder-csi-plugin +COPY --from=certs /etc/ssl/certs /etc/ssl/certs + +LABEL name="cinder-csi-plugin" \ + license="Apache Version 2.0" \ + maintainers="Kubernetes Authors" \ + description="Cinder CSI Plugin" \ + distribution-scope="public" \ + summary="Cinder CSI Plugin" \ + help="none" + +CMD ["/bin/cinder-csi-plugin"] diff --git a/pkg/csi/cinder/controllerserver.go b/pkg/csi/cinder/controllerserver.go index 110d3e3ec8..d1c84390e3 100644 --- a/pkg/csi/cinder/controllerserver.go +++ b/pkg/csi/cinder/controllerserver.go @@ -17,7 +17,9 @@ limitations under the License. package cinder import ( + "fmt" "strconv" + "time" "github.com/container-storage-interface/spec/lib/go/csi" "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots" @@ -27,6 +29,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/timestamppb" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/cloud-provider-openstack/pkg/csi/cinder/openstack" "k8s.io/cloud-provider-openstack/pkg/util" @@ -92,6 +95,10 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol if volSizeGB != volumes[0].Size { return nil, status.Error(codes.AlreadyExists, "Volume Already exists with same name and different capacity") } + + if volumes[0].Status != openstack.VolumeAvailableStatus { + return nil, status.Error(codes.Internal, fmt.Sprintf("Volume %s is not in available state", volumes[0].ID)) + } klog.V(4).Infof("Volume %s already exists in Availability Zone: %s of size %d GiB", volumes[0].ID, volumes[0].AvailabilityZone, volumes[0].Size) return getCreateVolumeResponse(&volumes[0], ignoreVolumeAZ, req.GetAccessibilityRequirements()), nil } else if len(volumes) > 1 { @@ -142,6 +149,18 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol } + targetStatus := []string{openstack.VolumeAvailableStatus} + err = cloud.WaitVolumeTargetStatusWithCustomBackoff(vol.ID, targetStatus, + &wait.Backoff{ + Duration: 20 * time.Second, + Steps: 5, + Factor: 1.28, + }) + if err != nil { + klog.Errorf("Failed to WaitVolumeTargetStatus of volume %s: %v", vol.ID, err) + return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume Volume %s failed getting available in time: %v", vol.ID, err)) + } + klog.V(4).Infof("CreateVolume: Successfully created volume %s in Availability Zone: %s of size %d GiB", vol.ID, vol.AvailabilityZone, vol.Size) return getCreateVolumeResponse(vol, ignoreVolumeAZ, req.GetAccessibilityRequirements()), nil diff --git a/pkg/csi/cinder/openstack/openstack.go b/pkg/csi/cinder/openstack/openstack.go index e3a76a167f..c82dcc357c 100644 --- a/pkg/csi/cinder/openstack/openstack.go +++ b/pkg/csi/cinder/openstack/openstack.go @@ -28,6 +28,7 @@ import ( "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" "github.com/spf13/pflag" gcfg "gopkg.in/gcfg.v1" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/cloud-provider-openstack/pkg/client" "k8s.io/cloud-provider-openstack/pkg/metrics" "k8s.io/cloud-provider-openstack/pkg/util/metadata" @@ -52,6 +53,7 @@ type IOpenStack interface { DetachVolume(instanceID, volumeID string) error WaitDiskDetached(instanceID string, volumeID string) error WaitVolumeTargetStatus(volumeID string, tStatus []string) error + WaitVolumeTargetStatusWithCustomBackoff(volumeID string, tStatus []string, backoff *wait.Backoff) error GetAttachmentDiskPath(instanceID, volumeID string) (string, error) GetVolume(volumeID string) (*volumes.Volume, error) GetVolumesByName(name string) ([]volumes.Volume, error) diff --git a/pkg/csi/cinder/openstack/openstack_mock.go b/pkg/csi/cinder/openstack/openstack_mock.go index 5c52cf0fbf..ef5dae489c 100644 --- a/pkg/csi/cinder/openstack/openstack_mock.go +++ b/pkg/csi/cinder/openstack/openstack_mock.go @@ -21,6 +21,7 @@ import ( "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes" "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" "github.com/stretchr/testify/mock" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/cloud-provider-openstack/pkg/util/metadata" ) @@ -174,6 +175,20 @@ func (_m *OpenStackMock) WaitVolumeTargetStatus(volumeID string, tStatus []strin return r0 } +// WaitVolumeTargetStatusWithCustomBackoff provides a mock function with given fields: volumeID, tStatus, backoff +func (_m *OpenStackMock) WaitVolumeTargetStatusWithCustomBackoff(volumeID string, tStatus []string, backoff *wait.Backoff) error { + ret := _m.Called(volumeID, tStatus, backoff) + + var r0 error + if rf, ok := ret.Get(0).(func(string, []string, *wait.Backoff) error); ok { + r0 = rf(volumeID, tStatus, backoff) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // WaitDiskDetached provides a mock function with given fields: instanceID, volumeID func (_m *OpenStackMock) WaitDiskDetached(instanceID string, volumeID string) error { ret := _m.Called(instanceID, volumeID) diff --git a/pkg/csi/cinder/openstack/openstack_volumes.go b/pkg/csi/cinder/openstack/openstack_volumes.go index 9ef45b455a..e14e92d58f 100644 --- a/pkg/csi/cinder/openstack/openstack_volumes.go +++ b/pkg/csi/cinder/openstack/openstack_volumes.go @@ -39,12 +39,12 @@ const ( operationFinishInitDelay = 1 * time.Second operationFinishFactor = 1.1 operationFinishSteps = 10 - diskAttachInitDelay = 1 * time.Second - diskAttachFactor = 1.2 - diskAttachSteps = 15 - diskDetachInitDelay = 1 * time.Second - diskDetachFactor = 1.2 - diskDetachSteps = 13 + diskAttachInitDelay = 7 * time.Second + diskAttachFactor = 1.4 + diskAttachSteps = 10 + diskDetachInitDelay = 7 * time.Second + diskDetachFactor = 1.4 + diskDetachSteps = 10 volumeDescription = "Created by OpenStack Cinder CSI driver" ) @@ -203,6 +203,12 @@ func (os *OpenStack) AttachVolume(instanceID, volumeID string) (string, error) { return "", fmt.Errorf("failed to attach %s volume to %s compute: %v", volumeID, instanceID, err) } + //redundant waitDiskAttached, workaround for raise condition in backend + err = os.WaitDiskAttached(instanceID, volumeID) + if err != nil { + return "", err + } + return volume.ID, nil } @@ -264,6 +270,33 @@ func (os *OpenStack) WaitVolumeTargetStatus(volumeID string, tStatus []string) e return waitErr } +// WaitVolumeTargetStatusWithCustomBackoff waits for volume to be in target state with custom backoff +func (os *OpenStack) WaitVolumeTargetStatusWithCustomBackoff(volumeID string, tStatus []string, backoff *wait.Backoff) error { + waitErr := wait.ExponentialBackoff(*backoff, func() (bool, error) { + vol, err := os.GetVolume(volumeID) + if err != nil { + return false, err + } + for _, t := range tStatus { + if vol.Status == t { + return true, nil + } + } + for _, eState := range volumeErrorStates { + if vol.Status == eState { + return false, fmt.Errorf("Volume is in Error State : %s", vol.Status) + } + } + return false, nil + }) + + if waitErr == wait.ErrWaitTimeout { + waitErr = fmt.Errorf("Timeout on waiting for volume %s status to be in %v", volumeID, tStatus) + } + + return waitErr +} + // DetachVolume detaches given cinder volume from the compute func (os *OpenStack) DetachVolume(instanceID, volumeID string) error { volume, err := os.GetVolume(volumeID)