metadata: annotations: description: Periodically runs serial tests for Scylla Operator latest on OpenShift on AWS. prow.k8s.io/context: "" prow.k8s.io/job: ci-scylla-operator-latest-e2e-openshift-aws-serial creationTimestamp: "2025-03-31T06:00:06Z" generation: 8 labels: app: scylla-operator created-by-prow: "true" prow.k8s.io/build-id: "1906587287781642240" prow.k8s.io/context: "" prow.k8s.io/id: 53d9ad19-4683-4608-808f-6e2943a7bc13 prow.k8s.io/job: ci-scylla-operator-latest-e2e-openshift-aws-serial prow.k8s.io/refs.base_ref: master prow.k8s.io/refs.org: scylladb prow.k8s.io/refs.repo: scylla-operator prow.k8s.io/type: periodic name: 53d9ad19-4683-4608-808f-6e2943a7bc13 namespace: prow-workspace resourceVersion: "614043813" uid: 3cf8d8f2-d637-486f-9047-ad4fead8f0ad spec: agent: kubernetes cluster: default decoration_config: gcs_configuration: bucket: gs://scylla-operator-prow path_strategy: explicit gcs_credentials_secret: gcs-credentials github_api_endpoints: - http://ghproxy.prow.svc - https://api.github.com github_app_id: "112385" github_app_private_key_secret: key: cert name: github-token grace_period: 15m0s resources: clonerefs: requests: cpu: 100m initupload: requests: cpu: 100m place_entrypoint: requests: cpu: 100m sidecar: requests: cpu: 100m timeout: 4h0m0s utility_images: clonerefs: us-docker.pkg.dev/k8s-infra-prow/images/clonerefs:v20240802-66b115076 entrypoint: us-docker.pkg.dev/k8s-infra-prow/images/entrypoint:v20240802-66b115076 initupload: us-docker.pkg.dev/k8s-infra-prow/images/initupload:v20240802-66b115076 sidecar: us-docker.pkg.dev/k8s-infra-prow/images/sidecar:v20240802-66b115076 extra_refs: - base_ref: master org: scylladb path_alias: github.com/scylladb/scylla-operator repo: scylla-operator workdir: true job: ci-scylla-operator-latest-e2e-openshift-aws-serial max_concurrency: 1 namespace: prow-workspace pod_spec: containers: - args: - | source "./hack/lib/kube.sh" function cleanup { touch /tmp/shared/setup.finished kubectl -n ci-clusters delete --wait=false --ignore-not-found kubernetescluster/"${POD_UID}" } trap cleanup EXIT kubectl -n ci-clusters create -f - < "${ARTIFACTS}/.ci/manifests/cluster/${POD_UID}-openshift-install-config.cm.yaml" timeout -v 2h bash -c 'until kubectl -n ci-clusters wait --for=condition=Degraded=False kubernetescluster/"${POD_UID}" --timeout=2h && kubectl -n ci-clusters wait --for=condition=Progressing=False kubernetescluster/"${POD_UID}" --timeout=2h && kubectl -n ci-clusters wait --for=condition=ClusterBootstrapped=True kubernetescluster/"${POD_UID}" --timeout=2h; do sleep 1; done' kubectl -n ci-clusters get secret/"${POD_UID}-kubeconfig" --template='{{ .data.kubeconfig }}' | base64 -d > /tmp/shared/kubeconfig.init kubectl --kubeconfig=/tmp/shared/kubeconfig.init config set-context --current --namespace 'default-unexisting-namespace' # Sanity check. kubectl --kubeconfig=/tmp/shared/kubeconfig.init version -o yaml kubectl --kubeconfig=/tmp/shared/kubeconfig.init config view kubectl --kubeconfig=/tmp/shared/kubeconfig.init label machineconfigpool worker cpumanager-policy=static kubectl --kubeconfig=/tmp/shared/kubeconfig.init create -f - <