mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-11 12:06:57 +00:00
Compare commits
8 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6762c5c096 | ||
|
|
fea1457f12 | ||
|
|
473295e3fc | ||
|
|
9f6f962fc7 | ||
|
|
2a475f25c7 | ||
|
|
dd9f25ea78 | ||
|
|
b8e4eee904 | ||
|
|
edbdef8d20 |
@@ -1464,7 +1464,7 @@ spec:
|
|||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: var-lib-docker
|
- name: var-lib-docker
|
||||||
mountPath: /var/lib/docker
|
mountPath: /var/lib/docker
|
||||||
volumeClaimtemplates:
|
volumeClaimTemplates:
|
||||||
- metadata:
|
- metadata:
|
||||||
name: var-lib-docker
|
name: var-lib-docker
|
||||||
spec:
|
spec:
|
||||||
|
|||||||
@@ -4,6 +4,7 @@
|
|||||||
* [Installation](#installation)
|
* [Installation](#installation)
|
||||||
* [InternalError when calling webhook: context deadline exceeded](#internalerror-when-calling-webhook-context-deadline-exceeded)
|
* [InternalError when calling webhook: context deadline exceeded](#internalerror-when-calling-webhook-context-deadline-exceeded)
|
||||||
* [Invalid header field value](#invalid-header-field-value)
|
* [Invalid header field value](#invalid-header-field-value)
|
||||||
|
* [Helm chart install failure: certificate signed by unknown authority](#helm-chart-install-failure-certificate-signed-by-unknown-authority)
|
||||||
* [Operations](#operations)
|
* [Operations](#operations)
|
||||||
* [Stuck runner kind or backing pod](#stuck-runner-kind-or-backing-pod)
|
* [Stuck runner kind or backing pod](#stuck-runner-kind-or-backing-pod)
|
||||||
* [Delay in jobs being allocated to runners](#delay-in-jobs-being-allocated-to-runners)
|
* [Delay in jobs being allocated to runners](#delay-in-jobs-being-allocated-to-runners)
|
||||||
@@ -105,6 +106,37 @@ Your base64'ed PAT token has a new line at the end, it needs to be created witho
|
|||||||
* `echo -n $TOKEN | base64`
|
* `echo -n $TOKEN | base64`
|
||||||
* Create the secret as described in the docs using the shell and documented flags
|
* Create the secret as described in the docs using the shell and documented flags
|
||||||
|
|
||||||
|
### Helm chart install failure: certificate signed by unknown authority
|
||||||
|
|
||||||
|
**Problem**
|
||||||
|
|
||||||
|
```
|
||||||
|
Error: UPGRADE FAILED: failed to create resource: Internal error occurred: failed calling webhook "webhook.cert-manager.io": failed to call webhook: Post "https://cert-manager-webhook.cert-manager.svc:443/mutate?timeout=10s": x509: certificate signed by unknown authority
|
||||||
|
```
|
||||||
|
|
||||||
|
Apparently, it's failing while `helm` is creating one of resources defined in the ARC chart and the cause was that cert-manager's webhook is not working correctly, due to the missing or the invalid CA certficate.
|
||||||
|
|
||||||
|
You'd try to tail logs from the `cert-manager-cainjector` and see it's failing with an error like:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ kubectl -n cert-manager logs cert-manager-cainjector-7cdbb9c945-g6bt4
|
||||||
|
I0703 03:31:55.159339 1 start.go:91] "starting" version="v1.1.1" revision="3ac7418070e22c87fae4b22603a6b952f797ae96"
|
||||||
|
I0703 03:31:55.615061 1 leaderelection.go:243] attempting to acquire leader lease kube-system/cert-manager-cainjector-leader-election...
|
||||||
|
I0703 03:32:10.738039 1 leaderelection.go:253] successfully acquired lease kube-system/cert-manager-cainjector-leader-election
|
||||||
|
I0703 03:32:10.739941 1 recorder.go:52] cert-manager/controller-runtime/manager/events "msg"="Normal" "message"="cert-manager-cainjector-7cdbb9c945-g6bt4_88e4bc70-eded-4343-a6fb-0ddd6434eb55 became leader" "object"={"kind":"ConfigMap","namespace":"kube-system","name":"cert-manager-cainjector-leader-election","uid":"942a021e-364c-461a-978c-f54a95723cdc","apiVersion":"v1","resourceVersion":"1576"} "reason"="LeaderElection"
|
||||||
|
E0703 03:32:11.192128 1 start.go:119] cert-manager/ca-injector "msg"="manager goroutine exited" "error"=null
|
||||||
|
I0703 03:32:12.339197 1 request.go:645] Throttling request took 1.047437675s, request: GET:https://10.96.0.1:443/apis/storage.k8s.io/v1beta1?timeout=32s
|
||||||
|
E0703 03:32:13.143790 1 start.go:151] cert-manager/ca-injector "msg"="Error registering certificate based controllers. Retrying after 5 seconds." "error"="no matches for kind \"MutatingWebhookConfiguration\" in version \"admissionregistration.k8s.io/v1beta1\""
|
||||||
|
Error: error registering secret controller: no matches for kind "MutatingWebhookConfiguration" in version "admissionregistration.k8s.io/v1beta1"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Solution**
|
||||||
|
|
||||||
|
Your cluster is based on a new enough Kubernetes of version 1.22 or greater which does not support the legacy `admissionregistration.k8s.io/v1beta1` API anymore, and your `cert-manager` is not up-to-date hence it's still trying to use the leagcy Kubernetes API.
|
||||||
|
|
||||||
|
In many cases, it's not an option to downgrade Kubernetes. So, just upgrade `cert-manager` to a more recent version that does have have the support for the specific Kubernetes version you're using.
|
||||||
|
|
||||||
|
See https://cert-manager.io/docs/installation/supported-releases/ for the list of available cert-manager versions.
|
||||||
|
|
||||||
## Operations
|
## Operations
|
||||||
|
|
||||||
|
|||||||
97
acceptance/argotunnel.sh
Executable file
97
acceptance/argotunnel.sh
Executable file
@@ -0,0 +1,97 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# See https://developers.cloudflare.com/cloudflare-one/tutorials/many-cfd-one-tunnel/
|
||||||
|
|
||||||
|
kubectl create ns tunnel || :
|
||||||
|
|
||||||
|
kubectl -n tunnel delete secret tunnel-credentials || :
|
||||||
|
|
||||||
|
kubectl -n tunnel create secret generic tunnel-credentials \
|
||||||
|
--from-file=credentials.json=$HOME/.cloudflared/${TUNNEL_ID}.json || :
|
||||||
|
|
||||||
|
cat <<MANIFEST | kubectl -n tunnel ${OP} -f -
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: cloudflared
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: cloudflared
|
||||||
|
replicas: 2 # You could also consider elastic scaling for this deployment
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: cloudflared
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: cloudflared
|
||||||
|
image: cloudflare/cloudflared:latest
|
||||||
|
args:
|
||||||
|
- tunnel
|
||||||
|
# Points cloudflared to the config file, which configures what
|
||||||
|
# cloudflared will actually do. This file is created by a ConfigMap
|
||||||
|
# below.
|
||||||
|
- --config
|
||||||
|
- /etc/cloudflared/config/config.yaml
|
||||||
|
- run
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
# Cloudflared has a /ready endpoint which returns 200 if and only if
|
||||||
|
# it has an active connection to the edge.
|
||||||
|
path: /ready
|
||||||
|
port: 2000
|
||||||
|
failureThreshold: 1
|
||||||
|
initialDelaySeconds: 10
|
||||||
|
periodSeconds: 10
|
||||||
|
volumeMounts:
|
||||||
|
- name: config
|
||||||
|
mountPath: /etc/cloudflared/config
|
||||||
|
readOnly: true
|
||||||
|
# Each tunnel has an associated "credentials file" which authorizes machines
|
||||||
|
# to run the tunnel. cloudflared will read this file from its local filesystem,
|
||||||
|
# and it'll be stored in a k8s secret.
|
||||||
|
- name: creds
|
||||||
|
mountPath: /etc/cloudflared/creds
|
||||||
|
readOnly: true
|
||||||
|
volumes:
|
||||||
|
- name: creds
|
||||||
|
secret:
|
||||||
|
secretName: tunnel-credentials
|
||||||
|
# Create a config.yaml file from the ConfigMap below.
|
||||||
|
- name: config
|
||||||
|
configMap:
|
||||||
|
name: cloudflared
|
||||||
|
items:
|
||||||
|
- key: config.yaml
|
||||||
|
path: config.yaml
|
||||||
|
---
|
||||||
|
# This ConfigMap is just a way to define the cloudflared config.yaml file in k8s.
|
||||||
|
# It's useful to define it in k8s, rather than as a stand-alone .yaml file, because
|
||||||
|
# this lets you use various k8s templating solutions (e.g. Helm charts) to
|
||||||
|
# parameterize your config, instead of just using string literals.
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: cloudflared
|
||||||
|
data:
|
||||||
|
config.yaml: |
|
||||||
|
# Name of the tunnel you want to run
|
||||||
|
tunnel: ${TUNNEL_NAME}
|
||||||
|
credentials-file: /etc/cloudflared/creds/credentials.json
|
||||||
|
# Serves the metrics server under /metrics and the readiness server under /ready
|
||||||
|
metrics: 0.0.0.0:2000
|
||||||
|
# Autoupdates applied in a k8s pod will be lost when the pod is removed or restarted, so
|
||||||
|
# autoupdate doesn't make sense in Kubernetes. However, outside of Kubernetes, we strongly
|
||||||
|
# recommend using autoupdate.
|
||||||
|
no-autoupdate: true
|
||||||
|
ingress:
|
||||||
|
# The first rule proxies traffic to the httpbin sample Service defined in app.yaml
|
||||||
|
- hostname: ${TUNNEL_HOSTNAME}
|
||||||
|
service: http://actions-runner-controller-github-webhook-server.actions-runner-system:80
|
||||||
|
# This rule matches any traffic which didn't match a previous rule, and responds with HTTP 404.
|
||||||
|
- service: http_status:404
|
||||||
|
MANIFEST
|
||||||
|
|
||||||
|
kubectl -n tunnel delete po -l app=cloudflared || :
|
||||||
@@ -51,6 +51,9 @@ if [ "${tool}" == "helm" ]; then
|
|||||||
--set image.tag=${VERSION} \
|
--set image.tag=${VERSION} \
|
||||||
--set podAnnotations.test-id=${TEST_ID} \
|
--set podAnnotations.test-id=${TEST_ID} \
|
||||||
--set githubWebhookServer.podAnnotations.test-id=${TEST_ID} \
|
--set githubWebhookServer.podAnnotations.test-id=${TEST_ID} \
|
||||||
|
--set imagePullSecrets[0].name=${IMAGE_PULL_SECRET} \
|
||||||
|
--set image.actionsRunnerImagePullSecrets[0].name=${IMAGE_PULL_SECRET} \
|
||||||
|
--set githubWebhookServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET} \
|
||||||
-f ${VALUES_FILE}
|
-f ${VALUES_FILE}
|
||||||
set +v
|
set +v
|
||||||
# To prevent `CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: metadata.annotations: Too long: must have at most 262144 bytes`
|
# To prevent `CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: metadata.annotations: Too long: must have at most 262144 bytes`
|
||||||
|
|||||||
@@ -1,6 +1,13 @@
|
|||||||
# Set actions-runner-controller settings for testing
|
# Set actions-runner-controller settings for testing
|
||||||
logLevel: "-4"
|
logLevel: "-4"
|
||||||
|
imagePullSecrets:
|
||||||
|
- name:
|
||||||
|
image:
|
||||||
|
actionsRunnerImagePullSecrets:
|
||||||
|
- name:
|
||||||
githubWebhookServer:
|
githubWebhookServer:
|
||||||
|
imagePullSecrets:
|
||||||
|
- name:
|
||||||
logLevel: "-4"
|
logLevel: "-4"
|
||||||
enabled: true
|
enabled: true
|
||||||
labels: {}
|
labels: {}
|
||||||
|
|||||||
@@ -15,10 +15,10 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.19.1
|
version: 0.20.0
|
||||||
|
|
||||||
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
||||||
appVersion: 0.24.1
|
appVersion: 0.25.0
|
||||||
|
|
||||||
home: https://github.com/actions-runner-controller/actions-runner-controller
|
home: https://github.com/actions-runner-controller/actions-runner-controller
|
||||||
|
|
||||||
|
|||||||
@@ -67,25 +67,6 @@ func annotatePodOnce(ctx context.Context, c client.Client, log logr.Logger, pod
|
|||||||
|
|
||||||
return updated, nil
|
return updated, nil
|
||||||
}
|
}
|
||||||
func labelPod(ctx context.Context, c client.Client, log logr.Logger, pod *corev1.Pod, k, v string) (*corev1.Pod, error) {
|
|
||||||
if pod == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
updated := pod.DeepCopy()
|
|
||||||
if updated.Labels == nil {
|
|
||||||
updated.Labels = map[string]string{}
|
|
||||||
}
|
|
||||||
updated.Labels[k] = v
|
|
||||||
if err := c.Patch(ctx, updated, client.MergeFrom(pod)); err != nil {
|
|
||||||
log.Error(err, fmt.Sprintf("Failed to patch pod to have %s annotation", k))
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.V(2).Info("Labeled pod", "key", k, "value", v)
|
|
||||||
|
|
||||||
return updated, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the first return value is nil, it's safe to delete the runner pod.
|
// If the first return value is nil, it's safe to delete the runner pod.
|
||||||
func ensureRunnerUnregistration(ctx context.Context, retryDelay time.Duration, log logr.Logger, ghClient *github.Client, c client.Client, enterprise, organization, repository, runner string, pod *corev1.Pod) (*ctrl.Result, error) {
|
func ensureRunnerUnregistration(ctx context.Context, retryDelay time.Duration, log logr.Logger, ghClient *github.Client, c client.Client, enterprise, organization, repository, runner string, pod *corev1.Pod) (*ctrl.Result, error) {
|
||||||
@@ -210,7 +191,7 @@ func ensureRunnerUnregistration(ctx context.Context, retryDelay time.Duration, l
|
|||||||
}
|
}
|
||||||
|
|
||||||
if runnerBusy {
|
if runnerBusy {
|
||||||
_, err := labelPod(ctx, c, log, pod, AnnotationKeyUnregistrationFailureMessage, runnerUnregistrationFailureMessage)
|
_, err := annotatePodOnce(ctx, c, log, pod, AnnotationKeyUnregistrationFailureMessage, runnerUnregistrationFailureMessage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &ctrl.Result{}, err
|
return &ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -165,6 +165,8 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
|||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.V(1).Info("Updated runnerreplicaset due to selector change")
|
||||||
|
|
||||||
// At this point, we are already sure that there's no need to create a new replicaset
|
// At this point, we are already sure that there's no need to create a new replicaset
|
||||||
// as the runner template hash is not changed.
|
// as the runner template hash is not changed.
|
||||||
//
|
//
|
||||||
@@ -182,7 +184,14 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
|||||||
//
|
//
|
||||||
// If we missed taking the EffectiveTime diff into account, you might end up experiencing scale-ups being delayed scale-down.
|
// If we missed taking the EffectiveTime diff into account, you might end up experiencing scale-ups being delayed scale-down.
|
||||||
// See https://github.com/actions-runner-controller/actions-runner-controller/pull/1477#issuecomment-1164154496
|
// See https://github.com/actions-runner-controller/actions-runner-controller/pull/1477#issuecomment-1164154496
|
||||||
if currentDesiredReplicas != newDesiredReplicas || newestSet.Spec.EffectiveTime != rd.Spec.EffectiveTime {
|
var et1, et2 time.Time
|
||||||
|
if newestSet.Spec.EffectiveTime != nil {
|
||||||
|
et1 = newestSet.Spec.EffectiveTime.Time
|
||||||
|
}
|
||||||
|
if rd.Spec.EffectiveTime != nil {
|
||||||
|
et2 = rd.Spec.EffectiveTime.Time
|
||||||
|
}
|
||||||
|
if currentDesiredReplicas != newDesiredReplicas || et1 != et2 {
|
||||||
newestSet.Spec.Replicas = &newDesiredReplicas
|
newestSet.Spec.Replicas = &newDesiredReplicas
|
||||||
newestSet.Spec.EffectiveTime = rd.Spec.EffectiveTime
|
newestSet.Spec.EffectiveTime = rd.Spec.EffectiveTime
|
||||||
|
|
||||||
@@ -192,6 +201,13 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
|||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.V(1).Info("Updated runnerreplicaset due to spec change",
|
||||||
|
"currentDesiredReplicas", currentDesiredReplicas,
|
||||||
|
"newDesiredReplicas", newDesiredReplicas,
|
||||||
|
"currentEffectiveTime", newestSet.Spec.EffectiveTime,
|
||||||
|
"newEffectiveTime", rd.Spec.EffectiveTime,
|
||||||
|
)
|
||||||
|
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -21,53 +21,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
controllerImageRepo = "actionsrunnercontrollere2e/actions-runner-controller"
|
certManagerVersion = "v1.8.2"
|
||||||
controllerImageTag = "e2e"
|
|
||||||
controllerImage = testing.Img(controllerImageRepo, controllerImageTag)
|
|
||||||
runnerImageRepo = "actionsrunnercontrollere2e/actions-runner"
|
|
||||||
runnerDindImageRepo = "actionsrunnercontrollere2e/actions-runner-dind"
|
|
||||||
runnerImageTag = "e2e"
|
|
||||||
runnerImage = testing.Img(runnerImageRepo, runnerImageTag)
|
|
||||||
runnerDindImage = testing.Img(runnerDindImageRepo, runnerImageTag)
|
|
||||||
|
|
||||||
prebuildImages = []testing.ContainerImage{
|
|
||||||
controllerImage,
|
|
||||||
runnerImage,
|
|
||||||
runnerDindImage,
|
|
||||||
}
|
|
||||||
|
|
||||||
builds = []testing.DockerBuild{
|
|
||||||
{
|
|
||||||
Dockerfile: "../../Dockerfile",
|
|
||||||
Args: []testing.BuildArg{},
|
|
||||||
Image: controllerImage,
|
|
||||||
EnableBuildX: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Dockerfile: "../../runner/actions-runner.dockerfile",
|
|
||||||
Args: []testing.BuildArg{
|
|
||||||
{
|
|
||||||
Name: "RUNNER_VERSION",
|
|
||||||
Value: "2.294.0",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Image: runnerImage,
|
|
||||||
EnableBuildX: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Dockerfile: "../../runner/actions-runner-dind.dockerfile",
|
|
||||||
Args: []testing.BuildArg{
|
|
||||||
{
|
|
||||||
Name: "RUNNER_VERSION",
|
|
||||||
Value: "2.294.0",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Image: runnerDindImage,
|
|
||||||
EnableBuildX: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
certManagerVersion = "v1.1.1"
|
|
||||||
|
|
||||||
images = []testing.ContainerImage{
|
images = []testing.ContainerImage{
|
||||||
testing.Img("docker", "dind"),
|
testing.Img("docker", "dind"),
|
||||||
@@ -77,11 +31,6 @@ var (
|
|||||||
testing.Img("quay.io/jetstack/cert-manager-webhook", certManagerVersion),
|
testing.Img("quay.io/jetstack/cert-manager-webhook", certManagerVersion),
|
||||||
}
|
}
|
||||||
|
|
||||||
commonScriptEnv = []string{
|
|
||||||
"SYNC_PERIOD=" + "30s",
|
|
||||||
"RUNNER_TAG=" + runnerImageTag,
|
|
||||||
}
|
|
||||||
|
|
||||||
testResultCMNamePrefix = "test-result-"
|
testResultCMNamePrefix = "test-result-"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -122,16 +71,31 @@ func TestE2E(t *testing.T) {
|
|||||||
t.Skip("Skipped as -short is set")
|
t.Skip("Skipped as -short is set")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
k8sMinorVer := os.Getenv("ARC_E2E_KUBE_VERSION")
|
||||||
skipRunnerCleanUp := os.Getenv("ARC_E2E_SKIP_RUNNER_CLEANUP") != ""
|
skipRunnerCleanUp := os.Getenv("ARC_E2E_SKIP_RUNNER_CLEANUP") != ""
|
||||||
retainCluster := os.Getenv("ARC_E2E_RETAIN_CLUSTER") != ""
|
retainCluster := os.Getenv("ARC_E2E_RETAIN_CLUSTER") != ""
|
||||||
skipTestIDCleanUp := os.Getenv("ARC_E2E_SKIP_TEST_ID_CLEANUP") != ""
|
skipTestIDCleanUp := os.Getenv("ARC_E2E_SKIP_TEST_ID_CLEANUP") != ""
|
||||||
|
skipArgoTunnelCleanUp := os.Getenv("ARC_E2E_SKIP_ARGO_TUNNEL_CLEAN_UP") != ""
|
||||||
|
|
||||||
env := initTestEnv(t)
|
vars := buildVars(os.Getenv("ARC_E2E_IMAGE_REPO"))
|
||||||
|
|
||||||
|
env := initTestEnv(t, k8sMinorVer, vars)
|
||||||
|
if vt := os.Getenv("ARC_E2E_VERIFY_TIMEOUT"); vt != "" {
|
||||||
|
var err error
|
||||||
|
env.VerifyTimeout, err = time.ParseDuration(vt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse duration %q: %v", vt, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
t.Run("build and load images", func(t *testing.T) {
|
t.Run("build and load images", func(t *testing.T) {
|
||||||
env.buildAndLoadImages(t)
|
env.buildAndLoadImages(t)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if t.Failed() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
t.Run("install cert-manager", func(t *testing.T) {
|
t.Run("install cert-manager", func(t *testing.T) {
|
||||||
env.installCertManager(t)
|
env.installCertManager(t)
|
||||||
})
|
})
|
||||||
@@ -159,6 +123,16 @@ func TestE2E(t *testing.T) {
|
|||||||
env.installActionsRunnerController(t, "summerwind/actions-runner-controller", "v0.24.1", testID)
|
env.installActionsRunnerController(t, "summerwind/actions-runner-controller", "v0.24.1", testID)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("install argo-tunnel", func(t *testing.T) {
|
||||||
|
env.installArgoTunnel(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
if !skipArgoTunnelCleanUp {
|
||||||
|
t.Cleanup(func() {
|
||||||
|
env.uninstallArgoTunnel(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
t.Run("deploy runners", func(t *testing.T) {
|
t.Run("deploy runners", func(t *testing.T) {
|
||||||
env.deploy(t, RunnerSets, testID)
|
env.deploy(t, RunnerSets, testID)
|
||||||
})
|
})
|
||||||
@@ -170,7 +144,7 @@ func TestE2E(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
t.Run("install edge actions-runner-controller", func(t *testing.T) {
|
t.Run("install edge actions-runner-controller", func(t *testing.T) {
|
||||||
env.installActionsRunnerController(t, controllerImageRepo, controllerImageTag, testID)
|
env.installActionsRunnerController(t, vars.controllerImageRepo, vars.controllerImageTag, testID)
|
||||||
})
|
})
|
||||||
|
|
||||||
if t.Failed() {
|
if t.Failed() {
|
||||||
@@ -209,6 +183,16 @@ func TestE2E(t *testing.T) {
|
|||||||
env.installActionsRunnerController(t, "summerwind/actions-runner-controller", "v0.24.1", testID)
|
env.installActionsRunnerController(t, "summerwind/actions-runner-controller", "v0.24.1", testID)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("install argo-tunnel", func(t *testing.T) {
|
||||||
|
env.installArgoTunnel(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
if !skipArgoTunnelCleanUp {
|
||||||
|
t.Cleanup(func() {
|
||||||
|
env.uninstallArgoTunnel(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
t.Run("deploy runners", func(t *testing.T) {
|
t.Run("deploy runners", func(t *testing.T) {
|
||||||
env.deploy(t, RunnerDeployments, testID)
|
env.deploy(t, RunnerDeployments, testID)
|
||||||
})
|
})
|
||||||
@@ -220,7 +204,7 @@ func TestE2E(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
t.Run("install edge actions-runner-controller", func(t *testing.T) {
|
t.Run("install edge actions-runner-controller", func(t *testing.T) {
|
||||||
env.installActionsRunnerController(t, controllerImageRepo, controllerImageTag, testID)
|
env.installActionsRunnerController(t, vars.controllerImageRepo, vars.controllerImageTag, testID)
|
||||||
})
|
})
|
||||||
|
|
||||||
if t.Failed() {
|
if t.Failed() {
|
||||||
@@ -248,6 +232,8 @@ func TestE2E(t *testing.T) {
|
|||||||
type env struct {
|
type env struct {
|
||||||
*testing.Env
|
*testing.Env
|
||||||
|
|
||||||
|
Kind *testing.Kind
|
||||||
|
|
||||||
// Uses GITHUB_APP_ID, GITHUB_APP_INSTALLATION_ID, and GITHUB_APP_PRIVATE_KEY
|
// Uses GITHUB_APP_ID, GITHUB_APP_INSTALLATION_ID, and GITHUB_APP_PRIVATE_KEY
|
||||||
// to let ARC authenticate as a GitHub App
|
// to let ARC authenticate as a GitHub App
|
||||||
useApp bool
|
useApp bool
|
||||||
@@ -262,12 +248,98 @@ type env struct {
|
|||||||
scaleDownDelaySecondsAfterScaleOut int64
|
scaleDownDelaySecondsAfterScaleOut int64
|
||||||
minReplicas int64
|
minReplicas int64
|
||||||
dockerdWithinRunnerContainer bool
|
dockerdWithinRunnerContainer bool
|
||||||
|
remoteKubeconfig string
|
||||||
|
imagePullSecretName string
|
||||||
|
|
||||||
|
vars vars
|
||||||
|
VerifyTimeout time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
func initTestEnv(t *testing.T) *env {
|
type vars struct {
|
||||||
|
controllerImageRepo, controllerImageTag string
|
||||||
|
|
||||||
|
runnerImageRepo string
|
||||||
|
runnerDindImageRepo string
|
||||||
|
|
||||||
|
prebuildImages []testing.ContainerImage
|
||||||
|
builds []testing.DockerBuild
|
||||||
|
|
||||||
|
commonScriptEnv []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildVars(repo string) vars {
|
||||||
|
if repo == "" {
|
||||||
|
repo = "actionsrunnercontrollere2e"
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
controllerImageRepo = repo + "/actions-runner-controller"
|
||||||
|
controllerImageTag = "e2e"
|
||||||
|
controllerImage = testing.Img(controllerImageRepo, controllerImageTag)
|
||||||
|
runnerImageRepo = repo + "/actions-runner"
|
||||||
|
runnerDindImageRepo = repo + "/actions-runner-dind"
|
||||||
|
runnerImageTag = "e2e"
|
||||||
|
runnerImage = testing.Img(runnerImageRepo, runnerImageTag)
|
||||||
|
runnerDindImage = testing.Img(runnerDindImageRepo, runnerImageTag)
|
||||||
|
)
|
||||||
|
|
||||||
|
var vs vars
|
||||||
|
|
||||||
|
vs.controllerImageRepo, vs.controllerImageTag = controllerImageRepo, controllerImageTag
|
||||||
|
vs.runnerDindImageRepo = runnerDindImageRepo
|
||||||
|
vs.runnerImageRepo = runnerImageRepo
|
||||||
|
|
||||||
|
// vs.controllerImage, vs.controllerImageTag
|
||||||
|
|
||||||
|
vs.prebuildImages = []testing.ContainerImage{
|
||||||
|
controllerImage,
|
||||||
|
runnerImage,
|
||||||
|
runnerDindImage,
|
||||||
|
}
|
||||||
|
|
||||||
|
vs.builds = []testing.DockerBuild{
|
||||||
|
{
|
||||||
|
Dockerfile: "../../Dockerfile",
|
||||||
|
Args: []testing.BuildArg{},
|
||||||
|
Image: controllerImage,
|
||||||
|
EnableBuildX: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Dockerfile: "../../runner/actions-runner.dockerfile",
|
||||||
|
Args: []testing.BuildArg{
|
||||||
|
{
|
||||||
|
Name: "RUNNER_VERSION",
|
||||||
|
Value: "2.294.0",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Image: runnerImage,
|
||||||
|
EnableBuildX: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Dockerfile: "../../runner/actions-runner-dind.dockerfile",
|
||||||
|
Args: []testing.BuildArg{
|
||||||
|
{
|
||||||
|
Name: "RUNNER_VERSION",
|
||||||
|
Value: "2.294.0",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Image: runnerDindImage,
|
||||||
|
EnableBuildX: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
vs.commonScriptEnv = []string{
|
||||||
|
"SYNC_PERIOD=" + "30s",
|
||||||
|
"RUNNER_TAG=" + runnerImageTag,
|
||||||
|
}
|
||||||
|
|
||||||
|
return vs
|
||||||
|
}
|
||||||
|
|
||||||
|
func initTestEnv(t *testing.T, k8sMinorVer string, vars vars) *env {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
testingEnv := testing.Start(t, testing.Preload(images...))
|
testingEnv := testing.Start(t, k8sMinorVer)
|
||||||
|
|
||||||
e := &env{Env: testingEnv}
|
e := &env{Env: testingEnv}
|
||||||
|
|
||||||
@@ -287,6 +359,23 @@ func initTestEnv(t *testing.T) *env {
|
|||||||
e.testOrgRepo = testing.Getenv(t, "TEST_ORG_REPO", "")
|
e.testOrgRepo = testing.Getenv(t, "TEST_ORG_REPO", "")
|
||||||
e.testEnterprise = testing.Getenv(t, "TEST_ENTERPRISE", "")
|
e.testEnterprise = testing.Getenv(t, "TEST_ENTERPRISE", "")
|
||||||
e.testEphemeral = testing.Getenv(t, "TEST_EPHEMERAL", "")
|
e.testEphemeral = testing.Getenv(t, "TEST_EPHEMERAL", "")
|
||||||
|
e.remoteKubeconfig = testing.Getenv(t, "ARC_E2E_REMOTE_KUBECONFIG", "")
|
||||||
|
e.imagePullSecretName = testing.Getenv(t, "ARC_E2E_IMAGE_PULL_SECRET_NAME", "")
|
||||||
|
e.vars = vars
|
||||||
|
|
||||||
|
if e.remoteKubeconfig == "" {
|
||||||
|
e.Kind = testing.StartKind(t, k8sMinorVer, testing.Preload(images...))
|
||||||
|
e.Env.Kubeconfig = e.Kind.Kubeconfig()
|
||||||
|
} else {
|
||||||
|
e.Env.Kubeconfig = e.remoteKubeconfig
|
||||||
|
|
||||||
|
// Kind automatically installs https://github.com/rancher/local-path-provisioner for PVs.
|
||||||
|
// But assuming the remote cluster isn't a kind Kubernetes cluster,
|
||||||
|
// we need to install any provisioner manually.
|
||||||
|
// Here, we install the local-path-provisioner on the remote cluster too,
|
||||||
|
// so that we won't suffer from E2E failures due to the provisioner difference.
|
||||||
|
e.KubectlApply(t, "https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.22/deploy/local-path-storage.yaml", testing.KubectlConfig{})
|
||||||
|
}
|
||||||
|
|
||||||
e.scaleDownDelaySecondsAfterScaleOut, _ = strconv.ParseInt(testing.Getenv(t, "TEST_RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT", "10"), 10, 32)
|
e.scaleDownDelaySecondsAfterScaleOut, _ = strconv.ParseInt(testing.Getenv(t, "TEST_RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT", "10"), 10, 32)
|
||||||
e.minReplicas, _ = strconv.ParseInt(testing.Getenv(t, "TEST_RUNNER_MIN_REPLICAS", "1"), 10, 32)
|
e.minReplicas, _ = strconv.ParseInt(testing.Getenv(t, "TEST_RUNNER_MIN_REPLICAS", "1"), 10, 32)
|
||||||
@@ -306,8 +395,29 @@ func (e *env) f() {
|
|||||||
func (e *env) buildAndLoadImages(t *testing.T) {
|
func (e *env) buildAndLoadImages(t *testing.T) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
e.DockerBuild(t, builds)
|
e.DockerBuild(t, e.vars.builds)
|
||||||
e.KindLoadImages(t, prebuildImages)
|
|
||||||
|
if e.remoteKubeconfig == "" {
|
||||||
|
e.KindLoadImages(t, e.vars.prebuildImages)
|
||||||
|
} else {
|
||||||
|
// If it fails with `no basic auth credentials` here, you might have missed logging into the container registry beforehand.
|
||||||
|
// For ECR, run something like:
|
||||||
|
// aws ecr get-login-password | docker login --username AWS --password-stdin ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com
|
||||||
|
// Also note that the authenticated session can be expired in a day or so(probably depends on your AWS config),
|
||||||
|
// so you might better write a script to do docker login before running the E2E test.
|
||||||
|
e.DockerPush(t, e.vars.prebuildImages)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *env) KindLoadImages(t *testing.T, prebuildImages []testing.ContainerImage) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
if err := e.Kind.LoadImages(ctx, prebuildImages); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *env) installCertManager(t *testing.T) {
|
func (e *env) installCertManager(t *testing.T) {
|
||||||
@@ -333,7 +443,7 @@ func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID str
|
|||||||
e.createControllerNamespaceAndServiceAccount(t)
|
e.createControllerNamespaceAndServiceAccount(t)
|
||||||
|
|
||||||
scriptEnv := []string{
|
scriptEnv := []string{
|
||||||
"KUBECONFIG=" + e.Kubeconfig(),
|
"KUBECONFIG=" + e.Kubeconfig,
|
||||||
"ACCEPTANCE_TEST_DEPLOYMENT_TOOL=" + "helm",
|
"ACCEPTANCE_TEST_DEPLOYMENT_TOOL=" + "helm",
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -342,6 +452,7 @@ func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID str
|
|||||||
"TEST_ID=" + testID,
|
"TEST_ID=" + testID,
|
||||||
"NAME=" + repo,
|
"NAME=" + repo,
|
||||||
"VERSION=" + tag,
|
"VERSION=" + tag,
|
||||||
|
"IMAGE_PULL_SECRET=" + e.imagePullSecretName,
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.useApp {
|
if e.useApp {
|
||||||
@@ -359,7 +470,7 @@ func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID str
|
|||||||
}
|
}
|
||||||
|
|
||||||
scriptEnv = append(scriptEnv, varEnv...)
|
scriptEnv = append(scriptEnv, varEnv...)
|
||||||
scriptEnv = append(scriptEnv, commonScriptEnv...)
|
scriptEnv = append(scriptEnv, e.vars.commonScriptEnv...)
|
||||||
|
|
||||||
e.RunScript(t, "../../acceptance/deploy.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv})
|
e.RunScript(t, "../../acceptance/deploy.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv})
|
||||||
}
|
}
|
||||||
@@ -380,7 +491,7 @@ func (e *env) do(t *testing.T, op string, kind DeployKind, testID string) {
|
|||||||
e.createControllerNamespaceAndServiceAccount(t)
|
e.createControllerNamespaceAndServiceAccount(t)
|
||||||
|
|
||||||
scriptEnv := []string{
|
scriptEnv := []string{
|
||||||
"KUBECONFIG=" + e.Kubeconfig(),
|
"KUBECONFIG=" + e.Kubeconfig,
|
||||||
"OP=" + op,
|
"OP=" + op,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -409,21 +520,43 @@ func (e *env) do(t *testing.T, op string, kind DeployKind, testID string) {
|
|||||||
if e.dockerdWithinRunnerContainer {
|
if e.dockerdWithinRunnerContainer {
|
||||||
varEnv = append(varEnv,
|
varEnv = append(varEnv,
|
||||||
"RUNNER_DOCKERD_WITHIN_RUNNER_CONTAINER=true",
|
"RUNNER_DOCKERD_WITHIN_RUNNER_CONTAINER=true",
|
||||||
"RUNNER_NAME="+runnerDindImageRepo,
|
"RUNNER_NAME="+e.vars.runnerDindImageRepo,
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
varEnv = append(varEnv,
|
varEnv = append(varEnv,
|
||||||
"RUNNER_DOCKERD_WITHIN_RUNNER_CONTAINER=false",
|
"RUNNER_DOCKERD_WITHIN_RUNNER_CONTAINER=false",
|
||||||
"RUNNER_NAME="+runnerImageRepo,
|
"RUNNER_NAME="+e.vars.runnerImageRepo,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
scriptEnv = append(scriptEnv, varEnv...)
|
scriptEnv = append(scriptEnv, varEnv...)
|
||||||
scriptEnv = append(scriptEnv, commonScriptEnv...)
|
scriptEnv = append(scriptEnv, e.vars.commonScriptEnv...)
|
||||||
|
|
||||||
e.RunScript(t, "../../acceptance/deploy_runners.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv})
|
e.RunScript(t, "../../acceptance/deploy_runners.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *env) installArgoTunnel(t *testing.T) {
|
||||||
|
e.doArgoTunnel(t, "apply")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *env) uninstallArgoTunnel(t *testing.T) {
|
||||||
|
e.doArgoTunnel(t, "delete")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *env) doArgoTunnel(t *testing.T, op string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
scriptEnv := []string{
|
||||||
|
"KUBECONFIG=" + e.Kubeconfig,
|
||||||
|
"OP=" + op,
|
||||||
|
"TUNNEL_ID=" + os.Getenv("TUNNEL_ID"),
|
||||||
|
"TUNNE_NAME=" + os.Getenv("TUNNEL_NAME"),
|
||||||
|
"TUNNEL_HOSTNAME=" + os.Getenv("TUNNEL_HOSTNAME"),
|
||||||
|
}
|
||||||
|
|
||||||
|
e.RunScript(t, "../../acceptance/argotunnel.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv})
|
||||||
|
}
|
||||||
|
|
||||||
func (e *env) runnerLabel(testID string) string {
|
func (e *env) runnerLabel(testID string) string {
|
||||||
return "test-" + testID
|
return "test-" + testID
|
||||||
}
|
}
|
||||||
@@ -448,7 +581,15 @@ func (e *env) testJobs(testID string) []job {
|
|||||||
func (e *env) verifyActionsWorkflowRun(t *testing.T, testID string) {
|
func (e *env) verifyActionsWorkflowRun(t *testing.T, testID string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
verifyActionsWorkflowRun(t, e.Env, e.testJobs(testID))
|
verifyActionsWorkflowRun(t, e.Env, e.testJobs(testID), e.verifyTimeout())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *env) verifyTimeout() time.Duration {
|
||||||
|
if e.VerifyTimeout > 0 {
|
||||||
|
return e.VerifyTimeout
|
||||||
|
}
|
||||||
|
|
||||||
|
return 8 * 60 * time.Second
|
||||||
}
|
}
|
||||||
|
|
||||||
type job struct {
|
type job struct {
|
||||||
@@ -625,7 +766,7 @@ kubectl create cm %s$id --from-literal=status=ok
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyActionsWorkflowRun(t *testing.T, env *testing.Env, testJobs []job) {
|
func verifyActionsWorkflowRun(t *testing.T, env *testing.Env, testJobs []job, timeout time.Duration) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
var expected []string
|
var expected []string
|
||||||
@@ -643,7 +784,7 @@ func verifyActionsWorkflowRun(t *testing.T, env *testing.Env, testJobs []job) {
|
|||||||
testResultCMName := testJobs[i].configMapName
|
testResultCMName := testJobs[i].configMapName
|
||||||
|
|
||||||
kubectlEnv := []string{
|
kubectlEnv := []string{
|
||||||
"KUBECONFIG=" + env.Kubeconfig(),
|
"KUBECONFIG=" + env.Kubeconfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
cmCfg := testing.KubectlConfig{
|
cmCfg := testing.KubectlConfig{
|
||||||
@@ -675,5 +816,5 @@ func verifyActionsWorkflowRun(t *testing.T, env *testing.Env, testJobs []job) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return results, err
|
return results, err
|
||||||
}, 8*60*time.Second, 30*time.Second).Should(gomega.Equal(expected))
|
}, timeout, 30*time.Second).Should(gomega.Equal(expected))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -71,3 +71,18 @@ func (k *Docker) dockerBuildCombinedOutput(ctx context.Context, build DockerBuil
|
|||||||
|
|
||||||
return k.CombinedOutput(cmd)
|
return k.CombinedOutput(cmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (k *Docker) Push(ctx context.Context, images []ContainerImage) error {
|
||||||
|
for _, img := range images {
|
||||||
|
_, err := k.CombinedOutput(dockerPushCmd(ctx, img.Repo, img.Tag))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func dockerPushCmd(ctx context.Context, repo, tag string) *exec.Cmd {
|
||||||
|
return exec.CommandContext(ctx, "docker", "push", repo+":"+tag)
|
||||||
|
}
|
||||||
|
|||||||
@@ -17,6 +17,12 @@ type T = testing.T
|
|||||||
|
|
||||||
var Short = testing.Short
|
var Short = testing.Short
|
||||||
|
|
||||||
|
var images = map[string]string{
|
||||||
|
"1.22": "kindest/node:v1.22.9@sha256:8135260b959dfe320206eb36b3aeda9cffcb262f4b44cda6b33f7bb73f453105",
|
||||||
|
"1.23": "kindest/node:v1.23.6@sha256:b1fa224cc6c7ff32455e0b1fd9cbfd3d3bc87ecaa8fcb06961ed1afb3db0f9ae",
|
||||||
|
"1.24": "kindest/node:v1.24.0@sha256:0866296e693efe1fed79d5e6c7af8df71fc73ae45e3679af05342239cdc5bc8e",
|
||||||
|
}
|
||||||
|
|
||||||
func Img(repo, tag string) ContainerImage {
|
func Img(repo, tag string) ContainerImage {
|
||||||
return ContainerImage{
|
return ContainerImage{
|
||||||
Repo: repo,
|
Repo: repo,
|
||||||
@@ -28,21 +34,17 @@ func Img(repo, tag string) ContainerImage {
|
|||||||
// All of its methods are idempotent so that you can safely call it from within each subtest
|
// All of its methods are idempotent so that you can safely call it from within each subtest
|
||||||
// and you can rerun the individual subtest until it works as you expect.
|
// and you can rerun the individual subtest until it works as you expect.
|
||||||
type Env struct {
|
type Env struct {
|
||||||
kind *Kind
|
Kubeconfig string
|
||||||
docker *Docker
|
docker *Docker
|
||||||
Kubectl *Kubectl
|
Kubectl *Kubectl
|
||||||
bash *Bash
|
bash *Bash
|
||||||
}
|
}
|
||||||
|
|
||||||
func Start(t *testing.T, opts ...Option) *Env {
|
func Start(t *testing.T, k8sMinorVer string) *Env {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
k := StartKind(t, opts...)
|
|
||||||
|
|
||||||
var env Env
|
var env Env
|
||||||
|
|
||||||
env.kind = k
|
|
||||||
|
|
||||||
d := &Docker{}
|
d := &Docker{}
|
||||||
|
|
||||||
env.docker = d
|
env.docker = d
|
||||||
@@ -59,12 +61,12 @@ func Start(t *testing.T, opts ...Option) *Env {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *Env) GetOrGenerateTestID(t *testing.T) string {
|
func (e *Env) GetOrGenerateTestID(t *testing.T) string {
|
||||||
k, kctl := e.kind, e.Kubectl
|
kctl := e.Kubectl
|
||||||
|
|
||||||
cmKey := "id"
|
cmKey := "id"
|
||||||
|
|
||||||
kubectlEnv := []string{
|
kubectlEnv := []string{
|
||||||
"KUBECONFIG=" + k.Kubeconfig(),
|
"KUBECONFIG=" + e.Kubeconfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
cmCfg := KubectlConfig{
|
cmCfg := KubectlConfig{
|
||||||
@@ -89,10 +91,10 @@ func (e *Env) GetOrGenerateTestID(t *testing.T) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *Env) DeleteTestID(t *testing.T) {
|
func (e *Env) DeleteTestID(t *testing.T) {
|
||||||
k, kctl := e.kind, e.Kubectl
|
kctl := e.Kubectl
|
||||||
|
|
||||||
kubectlEnv := []string{
|
kubectlEnv := []string{
|
||||||
"KUBECONFIG=" + k.Kubeconfig(),
|
"KUBECONFIG=" + e.Kubeconfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
cmCfg := KubectlConfig{
|
cmCfg := KubectlConfig{
|
||||||
@@ -119,13 +121,13 @@ func (e *Env) DockerBuild(t *testing.T, builds []DockerBuild) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Env) KindLoadImages(t *testing.T, prebuildImages []ContainerImage) {
|
func (e *Env) DockerPush(t *testing.T, images []ContainerImage) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
if err := e.kind.LoadImages(ctx, prebuildImages); err != nil {
|
if err := e.docker.Push(ctx, images); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -137,7 +139,7 @@ func (e *Env) KubectlApply(t *testing.T, path string, cfg KubectlConfig) {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
kubectlEnv := []string{
|
kubectlEnv := []string{
|
||||||
"KUBECONFIG=" + e.kind.Kubeconfig(),
|
"KUBECONFIG=" + e.Kubeconfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg.Env = append(kubectlEnv, cfg.Env...)
|
cfg.Env = append(kubectlEnv, cfg.Env...)
|
||||||
@@ -154,7 +156,7 @@ func (e *Env) KubectlWaitUntilDeployAvailable(t *testing.T, name string, cfg Kub
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
kubectlEnv := []string{
|
kubectlEnv := []string{
|
||||||
"KUBECONFIG=" + e.kind.Kubeconfig(),
|
"KUBECONFIG=" + e.Kubeconfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg.Env = append(kubectlEnv, cfg.Env...)
|
cfg.Env = append(kubectlEnv, cfg.Env...)
|
||||||
@@ -171,7 +173,7 @@ func (e *Env) KubectlEnsureNS(t *testing.T, name string, cfg KubectlConfig) {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
kubectlEnv := []string{
|
kubectlEnv := []string{
|
||||||
"KUBECONFIG=" + e.kind.Kubeconfig(),
|
"KUBECONFIG=" + e.Kubeconfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg.Env = append(kubectlEnv, cfg.Env...)
|
cfg.Env = append(kubectlEnv, cfg.Env...)
|
||||||
@@ -188,7 +190,7 @@ func (e *Env) KubectlEnsureClusterRoleBindingServiceAccount(t *testing.T, bindin
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
kubectlEnv := []string{
|
kubectlEnv := []string{
|
||||||
"KUBECONFIG=" + e.kind.Kubeconfig(),
|
"KUBECONFIG=" + e.Kubeconfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg.Env = append(kubectlEnv, cfg.Env...)
|
cfg.Env = append(kubectlEnv, cfg.Env...)
|
||||||
@@ -200,10 +202,6 @@ func (e *Env) KubectlEnsureClusterRoleBindingServiceAccount(t *testing.T, bindin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Env) Kubeconfig() string {
|
|
||||||
return e.kind.Kubeconfig()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Env) RunScript(t *testing.T, path string, cfg ScriptConfig) {
|
func (e *Env) RunScript(t *testing.T, path string, cfg ScriptConfig) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
@@ -251,7 +249,7 @@ type ContainerImage struct {
|
|||||||
Repo, Tag string
|
Repo, Tag string
|
||||||
}
|
}
|
||||||
|
|
||||||
func StartKind(t *testing.T, opts ...Option) *Kind {
|
func StartKind(t *testing.T, k8sMinorVer string, opts ...Option) *Kind {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
invalidChars := []string{"/"}
|
invalidChars := []string{"/"}
|
||||||
@@ -266,7 +264,7 @@ func StartKind(t *testing.T, opts ...Option) *Kind {
|
|||||||
k.Dir = t.TempDir()
|
k.Dir = t.TempDir()
|
||||||
|
|
||||||
kk := &k
|
kk := &k
|
||||||
if err := kk.Start(context.Background()); err != nil {
|
if err := kk.Start(context.Background(), k8sMinorVer); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
@@ -323,7 +321,7 @@ func (k *Kind) Kubeconfig() string {
|
|||||||
return k.kubeconfig
|
return k.kubeconfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k *Kind) Start(ctx context.Context) error {
|
func (k *Kind) Start(ctx context.Context, k8sMinorVer string) error {
|
||||||
getNodes, err := k.CombinedOutput(k.kindGetNodesCmd(ctx, k.Name))
|
getNodes, err := k.CombinedOutput(k.kindGetNodesCmd(ctx, k.Name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -337,6 +335,8 @@ func (k *Kind) Start(ctx context.Context) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
image := images[k8sMinorVer]
|
||||||
|
|
||||||
kindConfig := []byte(fmt.Sprintf(`kind: Cluster
|
kindConfig := []byte(fmt.Sprintf(`kind: Cluster
|
||||||
apiVersion: kind.x-k8s.io/v1alpha4
|
apiVersion: kind.x-k8s.io/v1alpha4
|
||||||
name: %s
|
name: %s
|
||||||
@@ -344,8 +344,10 @@ networking:
|
|||||||
apiServerAddress: 0.0.0.0
|
apiServerAddress: 0.0.0.0
|
||||||
nodes:
|
nodes:
|
||||||
- role: control-plane
|
- role: control-plane
|
||||||
|
image: %s
|
||||||
- role: worker
|
- role: worker
|
||||||
`, k.Name))
|
image: %s
|
||||||
|
`, k.Name, image, image))
|
||||||
|
|
||||||
if err := os.WriteFile(f.Name(), kindConfig, 0644); err != nil {
|
if err := os.WriteFile(f.Name(), kindConfig, 0644); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
Reference in New Issue
Block a user