metadata: annotations: description: Periodically runs serial tests for Scylla Operator latest on GKE. prow.k8s.io/context: "" prow.k8s.io/job: ci-scylla-operator-latest-e2e-gke-serial creationTimestamp: "2025-03-31T06:00:06Z" generation: 7 labels: app: scylla-operator created-by-prow: "true" prow.k8s.io/build-id: "1906587286787592192" prow.k8s.io/context: "" prow.k8s.io/id: e2ec8b9b-e30c-4207-8878-b0c0e80b63cd prow.k8s.io/job: ci-scylla-operator-latest-e2e-gke-serial prow.k8s.io/refs.base_ref: master prow.k8s.io/refs.org: scylladb prow.k8s.io/refs.repo: scylla-operator prow.k8s.io/type: periodic name: e2ec8b9b-e30c-4207-8878-b0c0e80b63cd namespace: prow-workspace resourceVersion: "613996444" uid: 40f0583e-420f-430d-9edc-fee146b716ef spec: agent: kubernetes cluster: default decoration_config: gcs_configuration: bucket: gs://scylla-operator-prow path_strategy: explicit gcs_credentials_secret: gcs-credentials github_api_endpoints: - http://ghproxy.prow.svc - https://api.github.com github_app_id: "112385" github_app_private_key_secret: key: cert name: github-token grace_period: 15m0s resources: clonerefs: requests: cpu: 100m initupload: requests: cpu: 100m place_entrypoint: requests: cpu: 100m sidecar: requests: cpu: 100m timeout: 2h0m0s utility_images: clonerefs: us-docker.pkg.dev/k8s-infra-prow/images/clonerefs:v20240802-66b115076 entrypoint: us-docker.pkg.dev/k8s-infra-prow/images/entrypoint:v20240802-66b115076 initupload: us-docker.pkg.dev/k8s-infra-prow/images/initupload:v20240802-66b115076 sidecar: us-docker.pkg.dev/k8s-infra-prow/images/sidecar:v20240802-66b115076 extra_refs: - base_ref: master org: scylladb path_alias: github.com/scylladb/scylla-operator repo: scylla-operator workdir: true job: ci-scylla-operator-latest-e2e-gke-serial max_concurrency: 1 namespace: prow-workspace pod_spec: containers: - args: - | function cleanup { touch /tmp/shared/setup.finished kubectl -n ci-clusters delete --wait=false --ignore-not-found kubernetescluster/"${POD_UID}" } trap cleanup EXIT kubectl -n ci-clusters create -f - < /tmp/shared/kubeconfig.init kubectl --kubeconfig=/tmp/shared/kubeconfig.init config set-context --current --namespace 'default-unexisting-namespace' # Sanity check. kubectl --kubeconfig=/tmp/shared/kubeconfig.init version -o yaml kubectl --kubeconfig=/tmp/shared/kubeconfig.init config view # Signal cluster bootstrap by sharing the kubeconfig. mv /tmp/shared/kubeconfig{.init,} set +x # TODO: Wait on active signal like updating a file every X seconds # so we can deal with the other container being OOM killed. echo "Waiting for test to finish..." until [[ -f "/tmp/shared/test.finished" ]]; do sleep 1; done echo "Test has finished." set -x command: - /usr/bin/bash - -euExo - pipefail - -O - inherit_errexit - -c env: - name: POD_UID valueFrom: fieldRef: fieldPath: metadata.uid image: quay.io/scylladb/scylla-operator-images:kube-tools imagePullPolicy: Always name: setup resources: limits: cpu: 100m memory: 200Mi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL seccompProfile: type: RuntimeDefault volumeMounts: - mountPath: /var/run/secrets/kubernetes.io/serviceaccount/ name: kube-api-access - mountPath: /tmp/shared name: shared-data - args: - | trap 'touch /tmp/shared/test.finished' EXIT set +x echo "Waiting for cluster to be provisioned..." until [[ -f "${KUBECONFIG}" || -f /tmp/shared/setup.finished ]]; do sleep 1; done if [[ -f /tmp/shared/setup.finished ]]; then echo "Cluster provisioning failed. Exiting." exit 1 fi echo "Cluster provisioning has finished." set -x # Setup info and kubeconfig sanity check kubectl config view kubectl version -o yaml kubectl cluster-info function resolve-image { ( set -euEo pipefail local digest digest=$( skopeo inspect --raw docker://"${1}" | skopeo manifest-digest /dev/stdin ) echo "${1}@${digest}" ) } SO_IMAGE="$( resolve-image docker.io/scylladb/scylla-operator:latest )" # We need to strip the tag because skopeo doesn't support references with both a tag and a digest. SO_IMAGE="${SO_IMAGE/:*([^:\/])@/@}" export SO_IMAGE SO_SUITE=scylla-operator/conformance/serial export SO_SUITE SO_NODECONFIG_PATH="" export SO_NODECONFIG_PATH SO_CSI_DRIVER_PATH="" export SO_CSI_DRIVER_PATH SO_SCYLLACLUSTER_NODE_SERVICE_TYPE=Headless export SO_SCYLLACLUSTER_NODE_SERVICE_TYPE SO_SCYLLACLUSTER_NODES_BROADCAST_ADDRESS_TYPE=PodIP export SO_SCYLLACLUSTER_NODES_BROADCAST_ADDRESS_TYPE SO_SCYLLACLUSTER_CLIENTS_BROADCAST_ADDRESS_TYPE=PodIP export SO_SCYLLACLUSTER_CLIENTS_BROADCAST_ADDRESS_TYPE SO_SCYLLACLUSTER_STORAGECLASS_NAME="" export SO_SCYLLACLUSTER_STORAGECLASS_NAME timeout --verbose --signal INT --kill-after=100m 90m ./hack/.ci/run-e2e-gke-release.sh command: - /usr/bin/bash - -euExo - pipefail - -O - inherit_errexit - -O - extglob - -c env: - name: POD_UID valueFrom: fieldRef: fieldPath: metadata.uid - name: KUBECONFIG value: /tmp/shared/kubeconfig image: quay.io/scylladb/scylla-operator-images:kube-tools imagePullPolicy: Always name: test resources: limits: cpu: 100m memory: 1200Mi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL seccompProfile: type: RuntimeDefault volumeMounts: - mountPath: /tmp/shared name: shared-data serviceAccountName: e2e volumes: - emptyDir: {} name: shared-data - name: kube-api-access projected: defaultMode: 400 sources: - serviceAccountToken: path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace prowjob_defaults: tenant_id: GlobalDefaultID report: true type: periodic status: build_id: "1906587286787592192" completionTime: "2025-03-31T06:43:29Z" description: Job succeeded. pendingTime: "2025-03-31T06:00:06Z" pod_name: e2ec8b9b-e30c-4207-8878-b0c0e80b63cd prev_report_states: gcsk8sreporter: success gcsreporter: success startTime: "2025-03-31T06:00:06Z" state: success url: https://prow.scylla-operator.scylladb.com/view/gs/scylla-operator-prow/logs/ci-scylla-operator-latest-e2e-gke-serial/1906587286787592192