metadata:
  annotations:
    description: Periodically runs parallel tests for Scylla Operator latest on GKE.
    prow.k8s.io/context: ""
    prow.k8s.io/job: ci-scylla-operator-v1.15-e2e-gke-parallel
  creationTimestamp: "2025-03-31T06:00:06Z"
  generation: 7
  labels:
    app: scylla-operator
    created-by-prow: "true"
    prow.k8s.io/build-id: "1906587288301735936"
    prow.k8s.io/context: ""
    prow.k8s.io/id: 028b0caa-effb-4690-934b-1fa75cd3d78e
    prow.k8s.io/job: ci-scylla-operator-v1.15-e2e-gke-parallel
    prow.k8s.io/refs.base_ref: v1.15
    prow.k8s.io/refs.org: scylladb
    prow.k8s.io/refs.repo: scylla-operator
    prow.k8s.io/type: periodic
  name: 028b0caa-effb-4690-934b-1fa75cd3d78e
  namespace: prow-workspace
  resourceVersion: "614012436"
  uid: c64ae101-1459-433d-98c3-2751829fc7cb
spec:
  agent: kubernetes
  cluster: default
  decoration_config:
    gcs_configuration:
      bucket: gs://scylla-operator-prow
      path_strategy: explicit
    gcs_credentials_secret: gcs-credentials
    github_api_endpoints:
    - http://ghproxy.prow.svc
    - https://api.github.com
    github_app_id: "112385"
    github_app_private_key_secret:
      key: cert
      name: github-token
    grace_period: 15m0s
    resources:
      clonerefs:
        requests:
          cpu: 100m
      initupload:
        requests:
          cpu: 100m
      place_entrypoint:
        requests:
          cpu: 100m
      sidecar:
        requests:
          cpu: 100m
    timeout: 2h0m0s
    utility_images:
      clonerefs: us-docker.pkg.dev/k8s-infra-prow/images/clonerefs:v20240802-66b115076
      entrypoint: us-docker.pkg.dev/k8s-infra-prow/images/entrypoint:v20240802-66b115076
      initupload: us-docker.pkg.dev/k8s-infra-prow/images/initupload:v20240802-66b115076
      sidecar: us-docker.pkg.dev/k8s-infra-prow/images/sidecar:v20240802-66b115076
  extra_refs:
  - base_ref: v1.15
    org: scylladb
    path_alias: github.com/scylladb/scylla-operator
    repo: scylla-operator
    workdir: true
  job: ci-scylla-operator-v1.15-e2e-gke-parallel
  max_concurrency: 1
  namespace: prow-workspace
  pod_spec:
    containers:
    - args:
      - |
        function cleanup {
          touch /tmp/shared/setup.finished
          kubectl -n ci-clusters delete --wait=false --ignore-not-found kubernetescluster/"${POD_UID}"
          kubectl -n ci-clusters delete --wait=false --ignore-not-found storagebuckets/"${POD_UID}"
        }
        trap cleanup EXIT

        kubectl -n ci-clusters create -f - <<EOF
        apiVersion: ci.scylladb.com/v1alpha1
        kind: KubernetesCluster
        metadata:
          name: "${POD_UID}"
        spec:
          version: "1.31"
          ttlSeconds: 14400
          credentialsSecret:
            name: "${POD_UID}-kubeconfig"
          type: GKE
          gke:
            location: us-central1
            nodePools:
            - name: infra
              locations:
              - us-central1-a
              config:
                machineType: e2-small
                diskType: pd-standard
                diskSizeGB: 40
                labels:
                  pool: infra
                taints:
                - key: dedicated-pool
                  value: infra
                  effect: NO_EXECUTE
              initialNodeCount: 1
            - name: workers
              locations:
              - us-central1-a
              config:
                machineType: c2d-standard-8
                diskType: pd-balanced
                diskSizeGB: 40
                localNVMeSSDBlockConfig:
                  localSSDCount: 1
                labels:
                  pool: workers
                  scylla.scylladb.com/node-type: scylla
                kubeletConfig:
                  cpuManagerPolicy: static
              initialNodeCount: 1
        EOF

        timeout -v 15m bash -c 'until kubectl -n ci-clusters wait --for=condition=Degraded=False kubernetescluster/"${POD_UID}" --timeout=15m && kubectl -n ci-clusters wait --for=condition=Progressing=False kubernetescluster/"${POD_UID}" --timeout=15m && kubectl -n ci-clusters wait --for=condition=ClusterBootstrapped=True kubernetescluster/"${POD_UID}" --timeout=15m; do sleep 1; done'

        kubectl -n ci-clusters create -f - <<EOF
        apiVersion: ci.scylladb.com/v1alpha1
        kind: StorageBucket
        metadata:
          name: "${POD_UID}"
        spec:
          ttlSeconds: 14400
          type: GCS
          gcs:
            location: us-central1
          credentialsSecret:
            name: "${POD_UID}-gcs-service-account"
        EOF

        timeout -v 5m bash -c 'until kubectl -n ci-clusters wait --for=condition=Degraded=False storagebucket/"${POD_UID}" --timeout=5m && kubectl -n ci-clusters wait --for=condition=Progressing=False storagebucket/"${POD_UID}" --timeout=5m; do sleep 1; done'
        kubectl -n ci-clusters get storagebuckets/"${POD_UID}" --template='{{ .status.bucketName }}' > /tmp/shared/gcs-bucket-name
        kubectl -n ci-clusters get secret/"${POD_UID}-gcs-service-account" --template='{{ index .data "gcs-service-account.json" }}' | base64 -d > /tmp/shared/gcs-service-account.json

        kubectl -n ci-clusters get secret/"${POD_UID}-kubeconfig" --template='{{ .data.kubeconfig }}' | base64 -d > /tmp/shared/kubeconfig.init
        kubectl --kubeconfig=/tmp/shared/kubeconfig.init config set-context --current --namespace 'default-unexisting-namespace'

        # Sanity check.
        kubectl --kubeconfig=/tmp/shared/kubeconfig.init version -o yaml
        kubectl --kubeconfig=/tmp/shared/kubeconfig.init config view

        # Signal cluster bootstrap by sharing the kubeconfig.
        mv /tmp/shared/kubeconfig{.init,}

        set +x
        # TODO: Wait on active signal like updating a file every X seconds
        #       so we can deal with the other container being OOM killed.
        echo "Waiting for test to finish..."
        until [[ -f "/tmp/shared/test.finished" ]]; do sleep 1; done
        echo "Test has finished."
        set -x
      command:
      - /usr/bin/bash
      - -euExo
      - pipefail
      - -O
      - inherit_errexit
      - -c
      env:
      - name: POD_UID
        valueFrom:
          fieldRef:
            fieldPath: metadata.uid
      image: quay.io/scylladb/scylla-operator-images:kube-tools
      imagePullPolicy: Always
      name: setup
      resources:
        limits:
          cpu: 100m
          memory: 200Mi
      securityContext:
        allowPrivilegeEscalation: false
        capabilities:
          drop:
          - ALL
        seccompProfile:
          type: RuntimeDefault
      volumeMounts:
      - mountPath: /var/run/secrets/kubernetes.io/serviceaccount/
        name: kube-api-access
      - mountPath: /tmp/shared
        name: shared-data
    - args:
      - |
        trap 'touch /tmp/shared/test.finished' EXIT

        set +x
        echo "Waiting for cluster to be provisioned..."
        until [[ -f "${KUBECONFIG}" || -f /tmp/shared/setup.finished ]]; do sleep 1; done
        if [[ -f /tmp/shared/setup.finished ]]; then
          echo "Cluster provisioning failed. Exiting."
          exit 1
        fi
        echo "Cluster provisioning has finished."
        set -x

        # Setup info and kubeconfig sanity check
        kubectl config view
        kubectl version -o yaml
        kubectl cluster-info

        function resolve-image {
          (
            set -euEo pipefail
            local digest
            digest=$( skopeo inspect --raw docker://"${1}" | skopeo manifest-digest /dev/stdin )
            echo "${1}@${digest}"
          )
        }

        SO_NODECONFIG_PATH="./hack/.ci/manifests/cluster/nodeconfig.yaml"
        export SO_NODECONFIG_PATH
        SO_CSI_DRIVER_PATH="./hack/.ci/manifests/namespaces/local-csi-driver/"
        export SO_CSI_DRIVER_PATH

        SO_IMAGE="$( resolve-image docker.io/scylladb/scylla-operator:1.15 )"
        # We need to strip the tag because skopeo doesn't support references with both a tag and a digest.
        SO_IMAGE="${SO_IMAGE/:*([^:\/])@/@}"
        export SO_IMAGE

        SO_SUITE=scylla-operator/conformance/parallel
        export SO_SUITE
        SO_SCYLLACLUSTER_NODE_SERVICE_TYPE=Headless
        export SO_SCYLLACLUSTER_NODE_SERVICE_TYPE
        SO_SCYLLACLUSTER_NODES_BROADCAST_ADDRESS_TYPE=PodIP
        export SO_SCYLLACLUSTER_NODES_BROADCAST_ADDRESS_TYPE
        SO_SCYLLACLUSTER_CLIENTS_BROADCAST_ADDRESS_TYPE=PodIP
        export SO_SCYLLACLUSTER_CLIENTS_BROADCAST_ADDRESS_TYPE
        SO_BUCKET_NAME="$( cat /tmp/shared/gcs-bucket-name )"
        export SO_BUCKET_NAME
        SO_GCS_SERVICE_ACCOUNT_CREDENTIALS_PATH=/tmp/shared/gcs-service-account.json
        export SO_GCS_SERVICE_ACCOUNT_CREDENTIALS_PATH
        timeout -v 2h ./hack/.ci/run-e2e-gke-release.sh
      command:
      - /usr/bin/bash
      - -euExo
      - pipefail
      - -O
      - inherit_errexit
      - -O
      - extglob
      - -c
      env:
      - name: POD_UID
        valueFrom:
          fieldRef:
            fieldPath: metadata.uid
      - name: KUBECONFIG
        value: /tmp/shared/kubeconfig
      image: quay.io/scylladb/scylla-operator-images:kube-tools
      imagePullPolicy: Always
      name: test
      resources:
        limits:
          cpu: 100m
          memory: 1200Mi
      securityContext:
        allowPrivilegeEscalation: false
        capabilities:
          drop:
          - ALL
        seccompProfile:
          type: RuntimeDefault
      volumeMounts:
      - mountPath: /tmp/shared
        name: shared-data
    serviceAccountName: e2e
    volumes:
    - emptyDir: {}
      name: shared-data
    - name: kube-api-access
      projected:
        defaultMode: 400
        sources:
        - serviceAccountToken:
            path: token
        - configMap:
            items:
            - key: ca.crt
              path: ca.crt
            name: kube-root-ca.crt
        - downwardAPI:
            items:
            - fieldRef:
                apiVersion: v1
                fieldPath: metadata.namespace
              path: namespace
  prowjob_defaults:
    tenant_id: GlobalDefaultID
  report: true
  type: periodic
status:
  build_id: "1906587288301735936"
  completionTime: "2025-03-31T07:05:14Z"
  description: Job succeeded.
  pendingTime: "2025-03-31T06:00:07Z"
  pod_name: 028b0caa-effb-4690-934b-1fa75cd3d78e
  prev_report_states:
    gcsk8sreporter: success
    gcsreporter: success
  startTime: "2025-03-31T06:00:06Z"
  state: success
  url: https://prow.scylla-operator.scylladb.com/view/gs/scylla-operator-prow/logs/ci-scylla-operator-v1.15-e2e-gke-parallel/1906587288301735936