Compare commits

..

3 Commits

Author SHA1 Message Date
Bassem Dghaidi
ad427c312a Merge branch 'master' into runnerset-docker-socket 2023-09-21 21:13:45 +02:00
Dmitry Chepurovskiy
61d90f42d1 Add actual dind config to values template 2023-09-20 16:11:00 +03:00
Dmitry Chepurovskiy
ab0502ea93 Fix #2809 : replace mTLS with unix socket 2023-08-23 12:04:37 -04:00
146 changed files with 13769 additions and 68530 deletions

View File

@@ -1,7 +1,7 @@
name: Bug Report (actions.summerwind.net API group)
description: File a bug report for actions.summerwind.net API group
name: Bug Report
description: File a bug report
title: "<Please write what didn't work for you here>"
labels: ["bug", "needs triage", "community"]
labels: ["bug", "needs triage"]
body:
- type: checkboxes
id: read-troubleshooting-guide
@@ -146,7 +146,7 @@ body:
render: shell
placeholder: |
PROVIDE THE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
To grab controller logs:
# Set NS according to your setup
@@ -166,7 +166,7 @@ body:
render: shell
placeholder: |
PROVIDE THE WHOLE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
To grab the runner pod logs:
# Set NS according to your setup. It should match your RunnerDeployment's metadata.namespace.
@@ -177,7 +177,7 @@ body:
kubectl -n $NS logs $POD_NAME -c runner > runnerpod_runner.log
kubectl -n $NS logs $POD_NAME -c docker > runnerpod_docker.log
If any of the containers are getting terminated immediately, try adding `--previous` to the kubectl-logs command to obtain logs emitted before the termination.
validations:
required: true

View File

@@ -1,8 +1,5 @@
blank_issues_enabled: false
contact_links:
- name: Feature requests for the gha-runner-scale-set (actions.github.com API group)
about: Feature requests associated with the actions.github.com group should be posted on the GitHub Community Support Forum
url: https://github.com/orgs/community/discussions/categories/actions
- name: Sponsor ARC Maintainers
about: If your business relies on the continued maintainance of actions-runner-controller, please consider sponsoring the project and the maintainers.
url: https://github.com/actions/actions-runner-controller/tree/master/CODEOWNERS

View File

@@ -1,7 +1,7 @@
---
name: Feature request (actions.summerwind.net API group)
name: Feature request
about: Suggest an idea for this project
labels: ["enhancement", "needs triage", "community"]
labels: ["enhancement", "needs triage"]
title: ''
assignees: ''
---

View File

@@ -1,113 +0,0 @@
name: Bug Report (actions.github.com API group)
description: File a bug report for actions.github.com API group
title: "<Please write what didn't work for you here>"
labels: ["bug", "needs triage", "gha-runner-scale-set"]
body:
- type: checkboxes
id: read-troubleshooting-guide
attributes:
label: Checks
description: Please check all the boxes below before submitting
options:
- label: I've already read https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/troubleshooting-actions-runner-controller-errors and I'm sure my issue is not covered in the troubleshooting guide.
required: true
- label: I am using charts that are officially provided
- type: input
id: controller-version
attributes:
label: Controller Version
description: Refers to semver-like release tags for controller versions. Any release tags prefixed with `gha-runner-scale-set-` are releases associated with this API group
placeholder: ex. 0.6.1
validations:
required: true
- type: dropdown
id: deployment-method
attributes:
label: Deployment Method
description: Which deployment method did you use to install ARC?
options:
- Helm
- Kustomize
- ArgoCD
- Other
validations:
required: true
- type: checkboxes
id: checks
attributes:
label: Checks
description: Please check all the boxes below before submitting
options:
- label: This isn't a question or user support case (For Q&A and community support, go to [Discussions](https://github.com/actions/actions-runner-controller/discussions)).
required: true
- label: I've read the [Changelog](https://github.com/actions/actions-runner-controller/blob/master/docs/gha-runner-scale-set-controller/README.md#changelog) before submitting this issue and I'm sure it's not due to any recently-introduced backward-incompatible changes
required: true
- type: textarea
id: reproduction-steps
attributes:
label: To Reproduce
description: "Steps to reproduce the behavior"
render: markdown
placeholder: |
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
validations:
required: true
- type: textarea
id: actual-behavior
attributes:
label: Describe the bug
description: Also tell us, what did happen?
placeholder: A clear and concise description of what happened.
validations:
required: true
- type: textarea
id: expected-behavior
attributes:
label: Describe the expected behavior
description: Also tell us, what did you expect to happen?
placeholder: A clear and concise description of what the expected behavior is.
validations:
required: true
- type: textarea
id: additional-context
attributes:
label: Additional Context
render: yaml
description: |
Provide `values.yaml` files that are relevant for this issue. PLEASE REDACT ANY INFORMATION THAT SHOULD NOT BE PUBLICALY AVAILABLE, LIKE GITHUB TOKEN FOR EXAMPLE.
placeholder: |
PLEASE REDACT ANY INFORMATION THAT SHOULD NOT BE PUBLICALY AVAILABLE, LIKE GITHUB TOKEN FOR EXAMPLE.
validations:
required: true
- type: textarea
id: controller-logs
attributes:
label: Controller Logs
description: "NEVER EVER OMIT THIS! Include complete logs from `actions-runner-controller`'s controller-manager pod."
render: shell
placeholder: |
PROVIDE THE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
To grab controller logs:
kubectl logs -n $NAMESPACE deployments/$CONTROLLER_DEPLOYMENT
validations:
required: true
- type: textarea
id: runner-pod-logs
attributes:
label: Runner Pod Logs
description: "Include logs and kubectl describe output from runner pod(s)."
render: shell
placeholder: |
PROVIDE THE WHOLE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
validations:
required: true

View File

@@ -193,7 +193,7 @@ runs:
shell: bash
run: |
helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug
kubectl wait --timeout=30s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-namespace}} -l app.kubernetes.io/instance=${{ inputs.arc-name }}
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-name}} -l app.kubernetes.io/instance=${{ inputs.arc-name }}
- name: Gather logs and cleanup
shell: bash

View File

@@ -63,7 +63,7 @@ jobs:
python-version: '3.11'
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.0
uses: helm/chart-testing-action@v2.3.1
- name: Run chart-testing (list-changed)
id: list-changed

View File

@@ -17,7 +17,7 @@ env:
PUSH_TO_REGISTRIES: true
TARGET_ORG: actions-runner-controller
TARGET_WORKFLOW: release-runners.yaml
DOCKER_VERSION: 24.0.7
DOCKER_VERSION: 20.10.23
concurrency:
group: ${{ github.workflow }}

View File

@@ -78,7 +78,7 @@ jobs:
run: |
RUNNER_MESSAGE="runner to v${RUNNER_LATEST_VERSION}"
CONTAINER_HOOKS_MESSAGE="container-hooks to v${CONTAINER_HOOKS_LATEST_VERSION}"
PR_NAME="Updates:"
if [ "$RUNNER_CURRENT_VERSION" != "$RUNNER_LATEST_VERSION" ]
then
@@ -88,7 +88,7 @@ jobs:
then
PR_NAME="$PR_NAME $CONTAINER_HOOKS_MESSAGE"
fi
result=$(gh pr list --search "$PR_NAME" --json number --jq ".[].number" --limit 1)
if [ -z "$result" ]
then
@@ -120,25 +120,21 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: New branch
run: git checkout -b update-runner-"$(date +%Y-%m-%d)"
- name: Update files
run: |
CURRENT_VERSION="${RUNNER_CURRENT_VERSION//./\\.}"
LATEST_VERSION="${RUNNER_LATEST_VERSION//./\\.}"
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/VERSION
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go
CURRENT_VERSION="${CONTAINER_HOOKS_CURRENT_VERSION//./\\.}"
LATEST_VERSION="${CONTAINER_HOOKS_LATEST_VERSION//./\\.}"
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/VERSION
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" runner/VERSION
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" runner/Makefile
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" Makefile
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" test/e2e/e2e_test.go
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" runner/VERSION
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" runner/Makefile
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" Makefile
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" test/e2e/e2e_test.go
- name: Commit changes
run: |

View File

@@ -28,7 +28,7 @@ permissions:
contents: read
concurrency:
# This will make sure we only apply the concurrency limits on pull requests
# This will make sure we only apply the concurrency limits on pull requests
# but not pushes to master branch by making the concurrency group name unique
# for pushes
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
@@ -69,10 +69,10 @@ jobs:
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v4
with:
python-version: '3.11'
python-version: '3.7'
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.0
uses: helm/chart-testing-action@v2.4.0
- name: Run chart-testing (list-changed)
id: list-changed

View File

@@ -16,7 +16,7 @@ env:
TARGET_ORG: actions-runner-controller
TARGET_REPO: arc_e2e_test_dummy
IMAGE_NAME: "arc-test-image"
IMAGE_VERSION: "0.9.0"
IMAGE_VERSION: "0.6.0"
concurrency:
# This will make sure we only apply the concurrency limits on pull requests
@@ -880,98 +880,3 @@ jobs:
helm uninstall "${{ steps.install_arc.outputs.ARC_NAME }}" --namespace "arc-runners" --debug
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n "${{ steps.install_arc.outputs.ARC_NAME }}" -l app.kubernetes.io/instance="${{ steps.install_arc.outputs.ARC_NAME }}"
kubectl logs deployment/arc-gha-rs-controller -n "arc-systems"
init-with-min-runners:
runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env:
WORKFLOW_FILE: arc-test-workflow.yaml
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.head_ref }}
- uses: ./.github/actions/setup-arc-e2e
id: setup
with:
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
image-name: ${{env.IMAGE_NAME}}
image-tag: ${{env.IMAGE_VERSION}}
target-org: ${{env.TARGET_ORG}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
run: |
helm install arc \
--namespace "arc-systems" \
--create-namespace \
--set image.repository=${{ env.IMAGE_NAME }} \
--set image.tag=${{ env.IMAGE_VERSION }} \
--set flags.updateStrategy="eventual" \
./charts/gha-runner-scale-set-controller \
--debug
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
exit 1
fi
sleep 1
count=$((count+1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
kubectl get pod -n arc-systems
kubectl describe deployment arc-gha-rs-controller -n arc-systems
- name: Install gha-runner-scale-set
id: install_arc
run: |
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
helm install "$ARC_NAME" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
--set minRunners=5 \
./charts/gha-runner-scale-set \
--debug
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
exit 1
fi
sleep 1
count=$((count+1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
- name: Ensure 5 runners are up
run: |
count=0
while true; do
pod_count=$(kubectl get pods -n arc-runners --no-headers | wc -l)
if [[ "$pod_count" = 5 ]]; then
echo "5 pods are up!"
break
fi
if [[ "$count" -ge 12 ]]; then
echo "Timeout waiting for 5 pods to be created"
exit 1
fi
sleep 1
count=$((count+1))
done

View File

@@ -24,7 +24,7 @@ permissions:
contents: read
concurrency:
# This will make sure we only apply the concurrency limits on pull requests
# This will make sure we only apply the concurrency limits on pull requests
# but not pushes to master branch by making the concurrency group name unique
# for pushes
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
@@ -65,10 +65,10 @@ jobs:
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v4
with:
python-version: '3.11'
python-version: '3.7'
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.0
uses: helm/chart-testing-action@v2.4.0
- name: Run chart-testing (list-changed)
id: list-changed

View File

@@ -2,7 +2,7 @@ name: Run CodeQL
on:
push:
branches:
branches:
- master
pull_request:
branches:
@@ -11,7 +11,7 @@ on:
- cron: '30 1 * * 0'
concurrency:
# This will make sure we only apply the concurrency limits on pull requests
# This will make sure we only apply the concurrency limits on pull requests
# but not pushes to master branch by making the concurrency group name unique
# for pushes
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
@@ -27,11 +27,6 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v3
- name: Install Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:

View File

@@ -19,7 +19,7 @@ permissions:
contents: read
concurrency:
# This will make sure we only apply the concurrency limits on pull requests
# This will make sure we only apply the concurrency limits on pull requests
# but not pushes to master branch by making the concurrency group name unique
# for pushes
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
@@ -51,7 +51,7 @@ jobs:
uses: golangci/golangci-lint-action@v3
with:
only-new-issues: true
version: v1.55.2
version: v1.51.1
generate:
runs-on: ubuntu-latest

View File

@@ -1,2 +1,2 @@
# actions-runner-controller maintainers
* @mumoshu @toast-gear @actions/actions-launch @nikola-jokic @rentziass
* @mumoshu @toast-gear @actions/actions-runtime @nikola-jokic

View File

@@ -73,7 +73,7 @@ To make your development cycle faster, use the below command to update deploy an
# Makefile
VERSION=controller1 \
RUNNER_TAG=runner1 \
make acceptance/pull acceptance/kind docker-buildx acceptance/load acceptance/deploy
make acceptance/pull acceptance/kind docker-build acceptance/load acceptance/deploy
```
If you've already deployed actions-runner-controller and only want to recreate pods to use the newer image, you can run:

View File

@@ -1,5 +1,5 @@
# Build the manager binary
FROM --platform=$BUILDPLATFORM golang:1.22.1 as builder
FROM --platform=$BUILDPLATFORM golang:1.20.7 as builder
WORKDIR /workspace
@@ -38,7 +38,6 @@ RUN --mount=target=. \
export GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOARM=${TARGETVARIANT#v} && \
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/manager main.go && \
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/github-runnerscaleset-listener ./cmd/githubrunnerscalesetlistener && \
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/ghalistener ./cmd/ghalistener && \
go build -trimpath -ldflags="-s -w" -o /out/github-webhook-server ./cmd/githubwebhookserver && \
go build -trimpath -ldflags="-s -w" -o /out/actions-metrics-server ./cmd/actionsmetricsserver && \
go build -trimpath -ldflags="-s -w" -o /out/sleep ./cmd/sleep
@@ -53,7 +52,6 @@ COPY --from=builder /out/manager .
COPY --from=builder /out/github-webhook-server .
COPY --from=builder /out/actions-metrics-server .
COPY --from=builder /out/github-runnerscaleset-listener .
COPY --from=builder /out/ghalistener .
COPY --from=builder /out/sleep .
USER 65532:65532

View File

@@ -6,7 +6,7 @@ endif
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
VERSION ?= dev
COMMIT_SHA = $(shell git rev-parse HEAD)
RUNNER_VERSION ?= 2.314.1
RUNNER_VERSION ?= 2.309.0
TARGETPLATFORM ?= $(shell arch)
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
RUNNER_TAG ?= ${VERSION}
@@ -68,7 +68,7 @@ endif
all: manager
lint:
docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v1.55.2 golangci-lint run
docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v1.54.2 golangci-lint run
GO_TEST_ARGS ?= -short
@@ -320,7 +320,7 @@ ifeq (, $(wildcard $(GOBIN)/controller-gen))
CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\
cd $$CONTROLLER_GEN_TMP_DIR ;\
go mod init tmp ;\
go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.14.0 ;\
go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0 ;\
rm -rf $$CONTROLLER_GEN_TMP_DIR ;\
}
endif

View File

@@ -42,10 +42,6 @@ type EphemeralRunner struct {
Status EphemeralRunnerStatus `json:"status,omitempty"`
}
func (er *EphemeralRunner) IsDone() bool {
return er.Status.Phase == corev1.PodSucceeded || er.Status.Phase == corev1.PodFailed
}
// EphemeralRunnerSpec defines the desired state of EphemeralRunner
type EphemeralRunnerSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster

View File

@@ -24,8 +24,6 @@ import (
type EphemeralRunnerSetSpec struct {
// Replicas is the number of desired EphemeralRunner resources in the k8s namespace.
Replicas int `json:"replicas,omitempty"`
// PatchID is the unique identifier for the patch issued by the listener app
PatchID int `json:"patchID"`
EphemeralRunnerSpec EphemeralRunnerSpec `json:"ephemeralRunnerSpec,omitempty"`
}

View File

@@ -1,4 +1,5 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The actions-runner-controller authors.

View File

@@ -23,7 +23,6 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
// log is for logging in this package.
@@ -49,20 +48,20 @@ func (r *Runner) Default() {
var _ webhook.Validator = &Runner{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (r *Runner) ValidateCreate() (admission.Warnings, error) {
func (r *Runner) ValidateCreate() error {
runnerLog.Info("validate resource to be created", "name", r.Name)
return nil, r.Validate()
return r.Validate()
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
func (r *Runner) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
func (r *Runner) ValidateUpdate(old runtime.Object) error {
runnerLog.Info("validate resource to be updated", "name", r.Name)
return nil, r.Validate()
return r.Validate()
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (r *Runner) ValidateDelete() (admission.Warnings, error) {
return nil, nil
func (r *Runner) ValidateDelete() error {
return nil
}
// Validate validates resource spec.

View File

@@ -23,7 +23,6 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
// log is for logging in this package.
@@ -49,20 +48,20 @@ func (r *RunnerDeployment) Default() {
var _ webhook.Validator = &RunnerDeployment{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (r *RunnerDeployment) ValidateCreate() (admission.Warnings, error) {
func (r *RunnerDeployment) ValidateCreate() error {
runnerDeploymentLog.Info("validate resource to be created", "name", r.Name)
return nil, r.Validate()
return r.Validate()
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
func (r *RunnerDeployment) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
func (r *RunnerDeployment) ValidateUpdate(old runtime.Object) error {
runnerDeploymentLog.Info("validate resource to be updated", "name", r.Name)
return nil, r.Validate()
return r.Validate()
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (r *RunnerDeployment) ValidateDelete() (admission.Warnings, error) {
return nil, nil
func (r *RunnerDeployment) ValidateDelete() error {
return nil
}
// Validate validates resource spec.

View File

@@ -23,7 +23,6 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
// log is for logging in this package.
@@ -49,20 +48,20 @@ func (r *RunnerReplicaSet) Default() {
var _ webhook.Validator = &RunnerReplicaSet{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (r *RunnerReplicaSet) ValidateCreate() (admission.Warnings, error) {
func (r *RunnerReplicaSet) ValidateCreate() error {
runnerReplicaSetLog.Info("validate resource to be created", "name", r.Name)
return nil, r.Validate()
return r.Validate()
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
func (r *RunnerReplicaSet) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
func (r *RunnerReplicaSet) ValidateUpdate(old runtime.Object) error {
runnerReplicaSetLog.Info("validate resource to be updated", "name", r.Name)
return nil, r.Validate()
return r.Validate()
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (r *RunnerReplicaSet) ValidateDelete() (admission.Warnings, error) {
return nil, nil
func (r *RunnerReplicaSet) ValidateDelete() error {
return nil
}
// Validate validates resource spec.

View File

@@ -1,4 +1,5 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The actions-runner-controller authors.

View File

@@ -15,10 +15,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.23.7
version: 0.23.5
# Used as the default manager tag value when no tag property is provided in the values.yaml
appVersion: 0.27.6
appVersion: 0.27.5
home: https://github.com/actions/actions-runner-controller

View File

@@ -8,156 +8,154 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
> _Default values are the defaults set in the charts `values.yaml`, some properties have default configurations in the code for when the property is omitted or invalid_
| Key | Description | Default |
|-----------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------|
| `labels` | Set labels to apply to all resources in the chart | |
| `replicaCount` | Set the number of controller pods | 1 |
| `webhookPort` | Set the containerPort for the webhook Pod | 9443 |
| `syncPeriod` | Set the period in which the controller reconciles the desired runners count | 1m |
| `enableLeaderElection` | Enable election configuration | true |
| `leaderElectionId` | Set the election ID for the controller group | |
| `githubEnterpriseServerURL` | Set the URL for a self-hosted GitHub Enterprise Server | |
| `githubURL` | Override GitHub URL to be used for GitHub API calls | |
| `githubUploadURL` | Override GitHub Upload URL to be used for GitHub API calls | |
| `runnerGithubURL` | Override GitHub URL to be used by runners during registration | |
| `logLevel` | Set the log level of the controller container | |
| `logFormat` | Set the log format of the controller. Valid options are "text" and "json" | text |
| `additionalVolumes` | Set additional volumes to add to the manager container | |
| `additionalVolumeMounts` | Set additional volume mounts to add to the manager container | |
| `authSecret.create` | Deploy the controller auth secret | false |
| `authSecret.name` | Set the name of the auth secret | controller-manager |
| `authSecret.annotations` | Set annotations for the auth Secret | |
| `authSecret.github_app_id` | The ID of your GitHub App. **This can't be set at the same time as `authSecret.github_token`** | |
| `authSecret.github_app_installation_id` | The ID of your GitHub App installation. **This can't be set at the same time as `authSecret.github_token`** | |
| `authSecret.github_app_private_key` | The multiline string of your GitHub App's private key. **This can't be set at the same time as `authSecret.github_token`** | |
| `authSecret.github_token` | Your chosen GitHub PAT token. **This can't be set at the same time as the `authSecret.github_app_*`** | |
| `authSecret.github_basicauth_username` | Username for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
| `authSecret.github_basicauth_password` | Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
| `dockerRegistryMirror` | The default Docker Registry Mirror used by runners. | |
| `hostNetwork` | The "hostNetwork" of the controller container | false |
| `dnsPolicy` | The "dnsPolicy" of the controller container | ClusterFirst |
| `image.repository` | The "repository/image" of the controller container | summerwind/actions-runner-controller |
| `image.tag` | The tag of the controller container | |
| `image.actionsRunnerRepositoryAndTag` | The "repository/image" of the actions runner container | summerwind/actions-runner:latest |
| `image.actionsRunnerImagePullSecrets` | Optional image pull secrets to be included in the runner pod's ImagePullSecrets | |
| `image.dindSidecarRepositoryAndTag` | The "repository/image" of the dind sidecar container | docker:dind |
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
| `metrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
| `metrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
| `metrics.serviceMonitor.namespace | Namespace which Prometheus is running in | `Release.Namespace` (the default namespace of the helm chart). |
| `metrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
| `metrics.serviceAnnotations` | Set annotations for the provisioned metrics service resource | |
| `metrics.port` | Set port of metrics service | 8443 |
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
| `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 |
| `metrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
| `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | |
| `fullnameOverride` | Override the full resource names | |
| `nameOverride` | Override the resource name prefix | |
| `serviceAccount.annotations` | Set annotations to the service account | |
| `serviceAccount.create` | Deploy the controller pod under a service account | true |
| `podAnnotations` | Set annotations for the controller pod | |
| `podLabels` | Set labels for the controller pod | |
| `serviceAccount.name` | Set the name of the service account | |
| `securityContext` | Set the security context for each container in the controller pod | |
| `podSecurityContext` | Set the security context to controller pod | |
| `service.annotations` | Set annotations for the provisioned webhook service resource | |
| `service.port` | Set controller service ports | |
| `service.type` | Set controller service type | |
| `topologySpreadConstraints` | Set the controller pod topologySpreadConstraints | |
| `nodeSelector` | Set the controller pod nodeSelector | |
| `resources` | Set the controller pod resources | |
| `affinity` | Set the controller pod affinity rules | |
| `podDisruptionBudget.enabled` | Enables a PDB to ensure HA of controller pods | false |
| `podDisruptionBudget.minAvailable` | Minimum number of pods that must be available after eviction | |
| `podDisruptionBudget.maxUnavailable` | Maximum number of pods that can be unavailable after eviction. Kubernetes 1.7+ required. | |
| `tolerations` | Set the controller pod tolerations | |
| `env` | Set environment variables for the controller container | |
| `priorityClassName` | Set the controller pod priorityClassName | |
| `scope.watchNamespace` | Tells the controller and the github webhook server which namespace to watch if `scope.singleNamespace` is true | `Release.Namespace` (the default namespace of the helm chart). |
| `scope.singleNamespace` | Limit the controller to watch a single namespace | false |
| `certManagerEnabled` | Enable cert-manager. If disabled you must set admissionWebHooks.caBundle and create TLS secrets manually | true |
| `runner.statusUpdateHook.enabled` | Use custom RBAC for runners (role, role binding and service account), this will enable reporting runner statuses | false |
| `admissionWebHooks.caBundle` | Base64-encoded PEM bundle containing the CA that signed the webhook's serving certificate | |
| `githubWebhookServer.logLevel` | Set the log level of the githubWebhookServer container | |
| `githubWebhookServer.logFormat` | Set the log format of the githubWebhookServer controller. Valid options are "text" and "json" | text |
| `githubWebhookServer.replicaCount` | Set the number of webhook server pods | 1 |
| `githubWebhookServer.useRunnerGroupsVisibility` | Enable supporting runner groups with custom visibility, you also need to set `githubWebhookServer.secret.enabled` to enable this feature. | false |
| `githubWebhookServer.enabled` | Deploy the webhook server pod | false |
| `githubWebhookServer.queueLimit` | Set the queue size limit in the githubWebhookServer | |
| `githubWebhookServer.secret.enabled` | Passes the webhook hook secret to the github-webhook-server | false |
| `githubWebhookServer.secret.create` | Deploy the webhook hook secret | false |
| `githubWebhookServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server |
| `githubWebhookServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
| `githubWebhookServer.imagePullSecrets` | Specifies the secret to be used when pulling the githubWebhookServer pod containers | |
| `githubWebhookServer.nameOverride` | Override the resource name prefix | |
| `githubWebhookServer.fullnameOverride` | Override the full resource names | |
| `githubWebhookServer.serviceAccount.create` | Deploy the githubWebhookServer under a service account | true |
| `githubWebhookServer.serviceAccount.annotations` | Set annotations for the service account | |
| `githubWebhookServer.serviceAccount.name` | Set the service account name | |
| `githubWebhookServer.podAnnotations` | Set annotations for the githubWebhookServer pod | |
| `githubWebhookServer.podLabels` | Set labels for the githubWebhookServer pod | |
| `githubWebhookServer.podSecurityContext` | Set the security context to githubWebhookServer pod | |
| `githubWebhookServer.securityContext` | Set the security context for each container in the githubWebhookServer pod | |
| `githubWebhookServer.resources` | Set the githubWebhookServer pod resources | |
| `githubWebhookServer.topologySpreadConstraints` | Set the githubWebhookServer pod topologySpreadConstraints | |
| `githubWebhookServer.nodeSelector` | Set the githubWebhookServer pod nodeSelector | |
| `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | |
| `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | |
| `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | |
| `githubWebhookServer.terminationGracePeriodSeconds` | Set the githubWebhookServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` |
| `githubWebhookServer.lifecycle` | Set the githubWebhookServer pod lifecycle hooks | `{}` |
| `githubWebhookServer.service.type` | Set githubWebhookServer service type | |
| `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
| `githubWebhookServer.service.loadBalancerSourceRanges` | Set githubWebhookServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` |
| `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false |
| `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | |
| `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
| `githubWebhookServer.ingress.tls` | Set tls configuration for ingress | |
| `githubWebhookServer.ingress.ingressClassName` | Set ingress class name | |
| `githubWebhookServer.podDisruptionBudget.enabled` | Enables a PDB to ensure HA of githubwebhook pods | false |
| `githubWebhookServer.podDisruptionBudget.minAvailable` | Minimum number of pods that must be available after eviction | |
| `githubWebhookServer.podDisruptionBudget.maxUnavailable` | Maximum number of pods that can be unavailable after eviction. Kubernetes 1.7+ required. | |
| `actionsMetricsServer.logLevel` | Set the log level of the actionsMetricsServer container | |
| `actionsMetricsServer.logFormat` | Set the log format of the actionsMetricsServer controller. Valid options are "text" and "json" | text |
| `actionsMetricsServer.enabled` | Deploy the actions metrics server pod | false |
| Key | Description | Default |
|----------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------|
| `labels` | Set labels to apply to all resources in the chart | |
| `replicaCount` | Set the number of controller pods | 1 |
| `webhookPort` | Set the containerPort for the webhook Pod | 9443 |
| `syncPeriod` | Set the period in which the controller reconciles the desired runners count | 1m |
| `enableLeaderElection` | Enable election configuration | true |
| `leaderElectionId` | Set the election ID for the controller group | |
| `githubEnterpriseServerURL` | Set the URL for a self-hosted GitHub Enterprise Server | |
| `githubURL` | Override GitHub URL to be used for GitHub API calls | |
| `githubUploadURL` | Override GitHub Upload URL to be used for GitHub API calls | |
| `runnerGithubURL` | Override GitHub URL to be used by runners during registration | |
| `logLevel` | Set the log level of the controller container | |
| `logFormat` | Set the log format of the controller. Valid options are "text" and "json" | text |
| `additionalVolumes` | Set additional volumes to add to the manager container | |
| `additionalVolumeMounts` | Set additional volume mounts to add to the manager container | |
| `authSecret.create` | Deploy the controller auth secret | false |
| `authSecret.name` | Set the name of the auth secret | controller-manager |
| `authSecret.annotations` | Set annotations for the auth Secret | |
| `authSecret.github_app_id` | The ID of your GitHub App. **This can't be set at the same time as `authSecret.github_token`** | |
| `authSecret.github_app_installation_id` | The ID of your GitHub App installation. **This can't be set at the same time as `authSecret.github_token`** | |
| `authSecret.github_app_private_key` | The multiline string of your GitHub App's private key. **This can't be set at the same time as `authSecret.github_token`** | |
| `authSecret.github_token` | Your chosen GitHub PAT token. **This can't be set at the same time as the `authSecret.github_app_*`** | |
| `authSecret.github_basicauth_username` | Username for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
| `authSecret.github_basicauth_password` | Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
| `dockerRegistryMirror` | The default Docker Registry Mirror used by runners. | |
| `hostNetwork` | The "hostNetwork" of the controller container | false |
| `dnsPolicy` | The "dnsPolicy" of the controller container | ClusterFirst |
| `image.repository` | The "repository/image" of the controller container | summerwind/actions-runner-controller |
| `image.tag` | The tag of the controller container | |
| `image.actionsRunnerRepositoryAndTag` | The "repository/image" of the actions runner container | summerwind/actions-runner:latest |
| `image.actionsRunnerImagePullSecrets` | Optional image pull secrets to be included in the runner pod's ImagePullSecrets | |
| `image.dindSidecarRepositoryAndTag` | The "repository/image" of the dind sidecar container | docker:dind |
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
| `metrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
| `metrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
| `metrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
| `metrics.serviceAnnotations` | Set annotations for the provisioned metrics service resource | |
| `metrics.port` | Set port of metrics service | 8443 |
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
| `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 |
| `metrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
| `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | |
| `fullnameOverride` | Override the full resource names | |
| `nameOverride` | Override the resource name prefix | |
| `serviceAccount.annotations` | Set annotations to the service account | |
| `serviceAccount.create` | Deploy the controller pod under a service account | true |
| `podAnnotations` | Set annotations for the controller pod | |
| `podLabels` | Set labels for the controller pod | |
| `serviceAccount.name` | Set the name of the service account | |
| `securityContext` | Set the security context for each container in the controller pod | |
| `podSecurityContext` | Set the security context to controller pod | |
| `service.annotations` | Set annotations for the provisioned webhook service resource | |
| `service.port` | Set controller service ports | |
| `service.type` | Set controller service type | |
| `topologySpreadConstraints` | Set the controller pod topologySpreadConstraints | |
| `nodeSelector` | Set the controller pod nodeSelector | |
| `resources` | Set the controller pod resources | |
| `affinity` | Set the controller pod affinity rules | |
| `podDisruptionBudget.enabled` | Enables a PDB to ensure HA of controller pods | false |
| `podDisruptionBudget.minAvailable` | Minimum number of pods that must be available after eviction | |
| `podDisruptionBudget.maxUnavailable` | Maximum number of pods that can be unavailable after eviction. Kubernetes 1.7+ required. | |
| `tolerations` | Set the controller pod tolerations | |
| `env` | Set environment variables for the controller container | |
| `priorityClassName` | Set the controller pod priorityClassName | |
| `scope.watchNamespace` | Tells the controller and the github webhook server which namespace to watch if `scope.singleNamespace` is true | `Release.Namespace` (the default namespace of the helm chart). |
| `scope.singleNamespace` | Limit the controller to watch a single namespace | false |
| `certManagerEnabled` | Enable cert-manager. If disabled you must set admissionWebHooks.caBundle and create TLS secrets manually | true |
| `runner.statusUpdateHook.enabled` | Use custom RBAC for runners (role, role binding and service account), this will enable reporting runner statuses | false |
| `admissionWebHooks.caBundle` | Base64-encoded PEM bundle containing the CA that signed the webhook's serving certificate | |
| `githubWebhookServer.logLevel` | Set the log level of the githubWebhookServer container | |
| `githubWebhookServer.logFormat` | Set the log format of the githubWebhookServer controller. Valid options are "text" and "json" | text |
| `githubWebhookServer.replicaCount` | Set the number of webhook server pods | 1 |
| `githubWebhookServer.useRunnerGroupsVisibility` | Enable supporting runner groups with custom visibility, you also need to set `githubWebhookServer.secret.enabled` to enable this feature. | false |
| `githubWebhookServer.enabled` | Deploy the webhook server pod | false |
| `githubWebhookServer.queueLimit` | Set the queue size limit in the githubWebhookServer | |
| `githubWebhookServer.secret.enabled` | Passes the webhook hook secret to the github-webhook-server | false |
| `githubWebhookServer.secret.create` | Deploy the webhook hook secret | false |
| `githubWebhookServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server |
| `githubWebhookServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
| `githubWebhookServer.imagePullSecrets` | Specifies the secret to be used when pulling the githubWebhookServer pod containers | |
| `githubWebhookServer.nameOverride` | Override the resource name prefix | |
| `githubWebhookServer.fullnameOverride` | Override the full resource names | |
| `githubWebhookServer.serviceAccount.create` | Deploy the githubWebhookServer under a service account | true |
| `githubWebhookServer.serviceAccount.annotations` | Set annotations for the service account | |
| `githubWebhookServer.serviceAccount.name` | Set the service account name | |
| `githubWebhookServer.podAnnotations` | Set annotations for the githubWebhookServer pod | |
| `githubWebhookServer.podLabels` | Set labels for the githubWebhookServer pod | |
| `githubWebhookServer.podSecurityContext` | Set the security context to githubWebhookServer pod | |
| `githubWebhookServer.securityContext` | Set the security context for each container in the githubWebhookServer pod | |
| `githubWebhookServer.resources` | Set the githubWebhookServer pod resources | |
| `githubWebhookServer.topologySpreadConstraints` | Set the githubWebhookServer pod topologySpreadConstraints | |
| `githubWebhookServer.nodeSelector` | Set the githubWebhookServer pod nodeSelector | |
| `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | |
| `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | |
| `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | |
| `githubWebhookServer.terminationGracePeriodSeconds` | Set the githubWebhookServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` |
| `githubWebhookServer.lifecycle` | Set the githubWebhookServer pod lifecycle hooks | `{}` |
| `githubWebhookServer.service.type` | Set githubWebhookServer service type | |
| `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
| `githubWebhookServer.service.loadBalancerSourceRanges` | Set githubWebhookServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` |
| `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false |
| `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | |
| `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
| `githubWebhookServer.ingress.tls` | Set tls configuration for ingress | |
| `githubWebhookServer.ingress.ingressClassName` | Set ingress class name | |
| `githubWebhookServer.podDisruptionBudget.enabled` | Enables a PDB to ensure HA of githubwebhook pods | false |
| `githubWebhookServer.podDisruptionBudget.minAvailable` | Minimum number of pods that must be available after eviction | |
| `githubWebhookServer.podDisruptionBudget.maxUnavailable` | Maximum number of pods that can be unavailable after eviction. Kubernetes 1.7+ required. | |
| `actionsMetricsServer.logLevel` | Set the log level of the actionsMetricsServer container | |
| `actionsMetricsServer.logFormat` | Set the log format of the actionsMetricsServer controller. Valid options are "text" and "json" | text |
| `actionsMetricsServer.enabled` | Deploy the actions metrics server pod | false |
| `actionsMetricsServer.secret.enabled` | Passes the webhook hook secret to the actions-metrics-server | false |
| `actionsMetricsServer.secret.create` | Deploy the webhook hook secret | false |
| `actionsMetricsServer.secret.name` | Set the name of the webhook hook secret | actions-metrics-server |
| `actionsMetricsServer.secret.name` | Set the name of the webhook hook secret | actions-metrics-server |
| `actionsMetricsServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
| `actionsMetricsServer.imagePullSecrets` | Specifies the secret to be used when pulling the actionsMetricsServer pod containers | |
| `actionsMetricsServer.nameOverride` | Override the resource name prefix | |
| `actionsMetricsServer.fullnameOverride` | Override the full resource names | |
| `actionsMetricsServer.serviceAccount.create` | Deploy the actionsMetricsServer under a service account | true |
| `actionsMetricsServer.imagePullSecrets` | Specifies the secret to be used when pulling the actionsMetricsServer pod containers | |
| `actionsMetricsServer.nameOverride` | Override the resource name prefix | |
| `actionsMetricsServer.fullnameOverride` | Override the full resource names | |
| `actionsMetricsServer.serviceAccount.create` | Deploy the actionsMetricsServer under a service account | true |
| `actionsMetricsServer.serviceAccount.annotations` | Set annotations for the service account | |
| `actionsMetricsServer.serviceAccount.name` | Set the service account name | |
| `actionsMetricsServer.podAnnotations` | Set annotations for the actionsMetricsServer pod | |
| `actionsMetricsServer.podLabels` | Set labels for the actionsMetricsServer pod | |
| `actionsMetricsServer.podSecurityContext` | Set the security context to actionsMetricsServer pod | |
| `actionsMetricsServer.securityContext` | Set the security context for each container in the actionsMetricsServer pod | |
| `actionsMetricsServer.resources` | Set the actionsMetricsServer pod resources | |
| `actionsMetricsServer.topologySpreadConstraints` | Set the actionsMetricsServer pod topologySpreadConstraints | |
| `actionsMetricsServer.nodeSelector` | Set the actionsMetricsServer pod nodeSelector | |
| `actionsMetricsServer.tolerations` | Set the actionsMetricsServer pod tolerations | |
| `actionsMetricsServer.affinity` | Set the actionsMetricsServer pod affinity rules | |
| `actionsMetricsServer.priorityClassName` | Set the actionsMetricsServer pod priorityClassName | |
| `actionsMetricsServer.podAnnotations` | Set annotations for the actionsMetricsServer pod | |
| `actionsMetricsServer.podLabels` | Set labels for the actionsMetricsServer pod | |
| `actionsMetricsServer.podSecurityContext` | Set the security context to actionsMetricsServer pod | |
| `actionsMetricsServer.securityContext` | Set the security context for each container in the actionsMetricsServer pod | |
| `actionsMetricsServer.resources` | Set the actionsMetricsServer pod resources | |
| `actionsMetricsServer.topologySpreadConstraints` | Set the actionsMetricsServer pod topologySpreadConstraints | |
| `actionsMetricsServer.nodeSelector` | Set the actionsMetricsServer pod nodeSelector | |
| `actionsMetricsServer.tolerations` | Set the actionsMetricsServer pod tolerations | |
| `actionsMetricsServer.affinity` | Set the actionsMetricsServer pod affinity rules | |
| `actionsMetricsServer.priorityClassName` | Set the actionsMetricsServer pod priorityClassName | |
| `actionsMetricsServer.terminationGracePeriodSeconds` | Set the actionsMetricsServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` |
| `actionsMetricsServer.lifecycle` | Set the actionsMetricsServer pod lifecycle hooks | `{}` |
| `actionsMetricsServer.service.type` | Set actionsMetricsServer service type | |
| `actionsMetricsServer.service.ports` | Set actionsMetricsServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
| `actionsMetricsServer.service.type` | Set actionsMetricsServer service type | |
| `actionsMetricsServer.service.ports` | Set actionsMetricsServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
| `actionsMetricsServer.service.loadBalancerSourceRanges` | Set actionsMetricsServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` |
| `actionsMetricsServer.ingress.enabled` | Deploy an ingress kind for the actionsMetricsServer | false |
| `actionsMetricsServer.ingress.enabled` | Deploy an ingress kind for the actionsMetricsServer | false |
| `actionsMetricsServer.ingress.annotations` | Set annotations for the ingress kind | |
| `actionsMetricsServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
| `actionsMetricsServer.ingress.tls` | Set tls configuration for ingress | |
| `actionsMetricsServer.ingress.ingressClassName` | Set ingress class name | |
| `actionsMetrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
| `actionsMetrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
| `actionsMetrics.serviceMonitor.namespace` | Namespace which Prometheus is running in. | `Release.Namespace` (the default namespace of the helm chart). |
| `actionsMetrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
| `actionsMetrics.serviceAnnotations` | Set annotations for the provisioned actions metrics service resource | |
| `actionsMetrics.port` | Set port of actions metrics service | 8443 |
| `actionsMetrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
| `actionsMetrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
| `actionsMetrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
| `actionsMetrics.serviceAnnotations` | Set annotations for the provisioned actions metrics service resource | |
| `actionsMetrics.port` | Set port of actions metrics service | 8443 |
| `actionsMetrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
| `actionsMetrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
| `actionsMetrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 |

View File

@@ -3,7 +3,8 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.14.0
controller-gen.kubebuilder.io/version: v0.11.3
creationTimestamp: null
name: horizontalrunnerautoscalers.actions.summerwind.dev
spec:
group: actions.summerwind.dev
@@ -35,19 +36,10 @@ spec:
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
@@ -56,9 +48,7 @@ spec:
properties:
capacityReservations:
items:
description: |-
CapacityReservation specifies the number of replicas temporarily added
to the scale target until ExpirationTime.
description: CapacityReservation specifies the number of replicas temporarily added to the scale target until ExpirationTime.
properties:
effectiveTime:
format: date-time
@@ -90,46 +80,30 @@ spec:
items:
properties:
repositoryNames:
description: |-
RepositoryNames is the list of repository names to be used for calculating the metric.
For example, a repository name is the REPO part of `github.com/USER/REPO`.
description: RepositoryNames is the list of repository names to be used for calculating the metric. For example, a repository name is the REPO part of `github.com/USER/REPO`.
items:
type: string
type: array
scaleDownAdjustment:
description: |-
ScaleDownAdjustment is the number of runners removed on scale-down.
You can only specify either ScaleDownFactor or ScaleDownAdjustment.
description: ScaleDownAdjustment is the number of runners removed on scale-down. You can only specify either ScaleDownFactor or ScaleDownAdjustment.
type: integer
scaleDownFactor:
description: |-
ScaleDownFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be removed.
description: ScaleDownFactor is the multiplicative factor applied to the current number of runners used to determine how many pods should be removed.
type: string
scaleDownThreshold:
description: |-
ScaleDownThreshold is the percentage of busy runners less than which will
trigger the hpa to scale the runners down.
description: ScaleDownThreshold is the percentage of busy runners less than which will trigger the hpa to scale the runners down.
type: string
scaleUpAdjustment:
description: |-
ScaleUpAdjustment is the number of runners added on scale-up.
You can only specify either ScaleUpFactor or ScaleUpAdjustment.
description: ScaleUpAdjustment is the number of runners added on scale-up. You can only specify either ScaleUpFactor or ScaleUpAdjustment.
type: integer
scaleUpFactor:
description: |-
ScaleUpFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be added.
description: ScaleUpFactor is the multiplicative factor applied to the current number of runners used to determine how many pods should be added.
type: string
scaleUpThreshold:
description: |-
ScaleUpThreshold is the percentage of busy runners greater than which will
trigger the hpa to scale runners up.
description: ScaleUpThreshold is the percentage of busy runners greater than which will trigger the hpa to scale runners up.
type: string
type:
description: |-
Type is the type of metric to be used for autoscaling.
It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
description: Type is the type of metric to be used for autoscaling. It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
type: string
type: object
type: array
@@ -137,9 +111,7 @@ spec:
description: MinReplicas is the minimum number of replicas the deployment is allowed to scale
type: integer
scaleDownDelaySecondsAfterScaleOut:
description: |-
ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up
Used to prevent flapping (down->up->down->... loop)
description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up Used to prevent flapping (down->up->down->... loop)
type: integer
scaleTargetRef:
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
@@ -155,18 +127,7 @@ spec:
type: string
type: object
scaleUpTriggers:
description: |-
ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
on each webhook requested received by the webhookBasedAutoscaler.
This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
Note that the added runners remain until the next sync period at least,
and they may or may not be used by GitHub Actions depending on the timing.
They are intended to be used to gain "resource slack" immediately after you
receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
description: "ScaleUpTriggers is an experimental feature to increase the desired replicas by 1 on each webhook requested received by the webhookBasedAutoscaler. \n This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster. \n Note that the added runners remain until the next sync period at least, and they may or may not be used by GitHub Actions depending on the timing. They are intended to be used to gain \"resource slack\" immediately after you receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available."
items:
properties:
amount:
@@ -179,18 +140,12 @@ spec:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
properties:
names:
description: |-
Names is a list of GitHub Actions glob patterns.
Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
So it is very likely that you can utilize this to trigger depending on the job.
description: Names is a list of GitHub Actions glob patterns. Any check_run event whose name matches one of patterns in the list can trigger autoscaling. Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file. So it is very likely that you can utilize this to trigger depending on the job.
items:
type: string
type: array
repositories:
description: |-
Repositories is a list of GitHub repositories.
Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
description: Repositories is a list of GitHub repositories. Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
items:
type: string
type: array
@@ -215,9 +170,7 @@ spec:
type: array
type: object
push:
description: |-
PushSpec is the condition for triggering scale-up on push event
Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
description: PushSpec is the condition for triggering scale-up on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
type: object
workflowJob:
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
@@ -226,33 +179,23 @@ spec:
type: object
type: array
scheduledOverrides:
description: |-
ScheduledOverrides is the list of ScheduledOverride.
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
The earlier a scheduled override is, the higher it is prioritized.
description: ScheduledOverrides is the list of ScheduledOverride. It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. The earlier a scheduled override is, the higher it is prioritized.
items:
description: |-
ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
description: ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
properties:
endTime:
description: EndTime is the time at which the first override ends.
format: date-time
type: string
minReplicas:
description: |-
MinReplicas is the number of runners while overriding.
If omitted, it doesn't override minReplicas.
description: MinReplicas is the number of runners while overriding. If omitted, it doesn't override minReplicas.
minimum: 0
nullable: true
type: integer
recurrenceRule:
properties:
frequency:
description: |-
Frequency is the name of a predefined interval of each recurrence.
The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
If empty, the corresponding override happens only once.
description: Frequency is the name of a predefined interval of each recurrence. The valid values are "Daily", "Weekly", "Monthly", and "Yearly". If empty, the corresponding override happens only once.
enum:
- Daily
- Weekly
@@ -260,9 +203,7 @@ spec:
- Yearly
type: string
untilTime:
description: |-
UntilTime is the time of the final recurrence.
If empty, the schedule recurs forever.
description: UntilTime is the time of the final recurrence. If empty, the schedule recurs forever.
format: date-time
type: string
type: object
@@ -291,24 +232,18 @@ spec:
type: object
type: array
desiredReplicas:
description: |-
DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
description: DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
type: integer
lastSuccessfulScaleOutTime:
format: date-time
nullable: true
type: string
observedGeneration:
description: |-
ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g.
RunnerDeployment's generation, which is updated on mutation by the API Server.
description: ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g. RunnerDeployment's generation, which is updated on mutation by the API Server.
format: int64
type: integer
scheduledOverridesSummary:
description: |-
ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
for observability.
description: ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output for observability.
type: string
type: object
type: object

View File

@@ -1,5 +1,4 @@
{{- if and .Values.actionsMetricsServer.enabled .Values.actionsMetrics.serviceMonitor.enable }}
{{- $servicemonitornamespace := .Values.actionsMetrics.serviceMonitor.namespace | default .Release.Namespace }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
@@ -9,7 +8,7 @@ metadata:
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "actions-runner-controller-actions-metrics-server.serviceMonitorName" . }}
namespace: {{ $servicemonitornamespace }}
namespace: {{ .Release.Namespace }}
spec:
endpoints:
- path: /metrics

View File

@@ -1,5 +1,4 @@
{{- if and .Values.githubWebhookServer.enabled .Values.metrics.serviceMonitor.enable }}
{{- $servicemonitornamespace := .Values.actionsMetrics.serviceMonitor.namespace | default .Release.Namespace }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
@@ -9,7 +8,7 @@ metadata:
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "actions-runner-controller-github-webhook-server.serviceMonitorName" . }}
namespace: {{ $servicemonitornamespace }}
namespace: {{ .Release.Namespace }}
spec:
endpoints:
- path: /metrics

View File

@@ -111,7 +111,6 @@ metrics:
serviceAnnotations: {}
serviceMonitor:
enable: false
namespace: ""
timeout: 30s
interval: 1m
serviceMonitorLabels: {}
@@ -313,7 +312,6 @@ actionsMetrics:
# to deploy the actions-metrics-server whose k8s service is referenced by the service monitor.
serviceMonitor:
enable: false
namespace: ""
timeout: 30s
interval: 1m
serviceMonitorLabels: {}

View File

@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.9.0
version: 0.6.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.9.0"
appVersion: "0.6.0"
home: https://github.com/actions/actions-runner-controller

View File

@@ -2,4 +2,4 @@ Thank you for installing {{ .Chart.Name }}.
Your release is named {{ .Release.Name }}.
WARNING: Older version of the listener (githubrunnerscalesetlistener) is deprecated and will be removed in the future gha-runner-scale-set-0.10.0 release. If you are using environment variable override to force the old listener, please remove the environment variable and use the new listener (ghalistener) instead.
WARNING: value specified under image.pullPolicy will be ignored and no longer be applied to the listener pod spec as of gha-runner-scale-set-0.7.0. Please use the listenerTemplate in the gha-runner-scale-set chart to control the image pull policy of the listener.

View File

@@ -48,7 +48,7 @@ app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
app.kubernetes.io/part-of: gha-rs-controller
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- range $k, $v := .Values.labels }}
{{ $k }}: {{ $v | quote }}
{{ $k }}: {{ $v }}
{{- end }}
{{- end }}

View File

@@ -27,9 +27,6 @@ spec:
app.kubernetes.io/component: controller-manager
app.kubernetes.io/version: {{ .Chart.Version }}
{{- include "gha-runner-scale-set-controller.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
@@ -94,6 +91,8 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY
value: "{{ .Values.image.pullPolicy | default "IfNotPresent" }}"
{{- with .Values.env }}
{{- if kindIs "slice" . }}
{{- toYaml . | nindent 8 }}
@@ -110,16 +109,10 @@ spec:
volumeMounts:
- mountPath: /tmp
name: tmp
{{- range .Values.volumeMounts }}
- {{ toYaml . | nindent 10 }}
{{- end }}
terminationGracePeriodSeconds: 10
volumes:
- name: tmp
emptyDir: {}
{{- range .Values.volumes }}
- {{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}

View File

@@ -368,12 +368,14 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
}
assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3)
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
assert.Equal(t, "IfNotPresent", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
@@ -404,8 +406,6 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
SetValues: map[string]string{
"labels.foo": "bar",
"labels.github": "actions",
"labels.team": "GitHub Team",
"labels.teamMail": "team@github.com",
"replicaCount": "1",
"image.pullPolicy": "Always",
"image.tag": "dev",
@@ -424,14 +424,10 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
"tolerations[0].key": "foo",
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key": "foo",
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator": "bar",
"priorityClassName": "test-priority-class",
"flags.updateStrategy": "eventual",
"flags.logLevel": "info",
"flags.logFormat": "json",
"volumes[0].name": "customMount",
"volumes[0].configMap.name": "my-configmap",
"volumeMounts[0].name": "customMount",
"volumeMounts[0].mountPath": "/my/mount/path",
"priorityClassName": "test-priority-class",
"flags.updateStrategy": "eventual",
"flags.logLevel": "info",
"flags.logFormat": "json",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -451,8 +447,6 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
assert.Equal(t, "gha-rs-controller", deployment.Labels["app.kubernetes.io/part-of"])
assert.Equal(t, "bar", deployment.Labels["foo"])
assert.Equal(t, "actions", deployment.Labels["github"])
assert.Equal(t, "GitHub Team", deployment.Labels["team"])
assert.Equal(t, "team@github.com", deployment.Labels["teamMail"])
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
@@ -465,8 +459,8 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
assert.Equal(t, "bar", deployment.Spec.Template.Annotations["foo"])
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Value)
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 1)
assert.Equal(t, "dockerhub", deployment.Spec.Template.Spec.ImagePullSecrets[0].Name)
@@ -474,11 +468,9 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.SecurityContext.FSGroup)
assert.Equal(t, "test-priority-class", deployment.Spec.Template.Spec.PriorityClassName)
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
assert.Len(t, deployment.Spec.Template.Spec.Volumes, 2)
assert.Len(t, deployment.Spec.Template.Spec.Volumes, 1)
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Volumes[0].Name)
assert.NotNil(t, deployment.Spec.Template.Spec.Volumes[0].EmptyDir)
assert.Equal(t, "customMount", deployment.Spec.Template.Spec.Volumes[1].Name)
assert.Equal(t, "my-configmap", deployment.Spec.Template.Spec.Volumes[1].ConfigMap.Name)
assert.NotNil(t, 10, deployment.Spec.Template.Spec.Volumes[0].EmptyDir)
assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 1)
assert.Equal(t, "bar", deployment.Spec.Template.Spec.NodeSelector["foo"])
@@ -513,25 +505,25 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
assert.ElementsMatch(t, expectArgs, deployment.Spec.Template.Spec.Containers[0].Args)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 4)
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
assert.Equal(t, "Always", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Value)
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
assert.Equal(t, "500m", deployment.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String())
assert.True(t, *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsNonRoot)
assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsUser)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 2)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name)
assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath)
assert.Equal(t, "customMount", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name)
assert.Equal(t, "/my/mount/path", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath)
}
func TestTemplate_EnableLeaderElectionRole(t *testing.T) {
@@ -770,12 +762,14 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3)
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
assert.Equal(t, "IfNotPresent", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
@@ -818,17 +812,17 @@ func TestTemplate_ControllerContainerEnvironmentVariables(t *testing.T) {
assert.Equal(t, namespaceName, deployment.Namespace)
assert.Equal(t, "test-arc-gha-rs-controller", deployment.Name)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 6)
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Value)
assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
assert.Equal(t, "secret-name", deployment.Spec.Template.Spec.Containers[0].Env[3].ValueFrom.SecretKeyRef.Name)
assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[3].ValueFrom.SecretKeyRef.Key)
assert.True(t, *deployment.Spec.Template.Spec.Containers[0].Env[3].ValueFrom.SecretKeyRef.Optional)
assert.Equal(t, "ENV_VAR_NAME_3", deployment.Spec.Template.Spec.Containers[0].Env[4].Name)
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[4].Value)
assert.Equal(t, "ENV_VAR_NAME_4", deployment.Spec.Template.Spec.Containers[0].Env[5].Name)
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[5].ValueFrom)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 7)
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[4].Name)
assert.Equal(t, "secret-name", deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Name)
assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Key)
assert.True(t, *deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Optional)
assert.Equal(t, "ENV_VAR_NAME_3", deployment.Spec.Template.Spec.Containers[0].Env[5].Name)
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[5].Value)
assert.Equal(t, "ENV_VAR_NAME_4", deployment.Spec.Template.Spec.Containers[0].Env[6].Name)
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[6].ValueFrom)
}
func TestTemplate_WatchSingleNamespace_NotCreateManagerClusterRole(t *testing.T) {

View File

@@ -41,8 +41,6 @@ serviceAccount:
podAnnotations: {}
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
@@ -72,10 +70,6 @@ tolerations: []
affinity: {}
# Mount volumes in the container.
volumes: []
volumeMounts: []
# Leverage a PriorityClass to ensure your pods survive resource shortages
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
# PriorityClass: system-cluster-critical
@@ -106,7 +100,7 @@ flags:
## Defines how the controller should handle upgrades while having running jobs.
##
## The strategies available are:
## The srategies available are:
## - "immediate": (default) The controller will immediately apply the change causing the
## recreation of the listener and ephemeral runner set. This can lead to an
## overprovisioning of runners, if there are pending / running jobs. This should not

View File

@@ -15,18 +15,18 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.9.0
version: 0.6.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.9.0"
appVersion: "0.6.0"
home: https://github.com/actions/actions-runner-controller
home: https://github.com/actions/dev-arc
sources:
- "https://github.com/actions/actions-runner-controller"
- "https://github.com/actions/dev-arc"
maintainers:
- name: actions

View File

@@ -10,10 +10,6 @@ gha-rs
{{- default (include "gha-base-name" .) .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- define "gha-runner-scale-set.scale-set-name" -}}
{{ .Values.runnerScaleSetName | default .Release.Name }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
@@ -21,7 +17,7 @@ If release name contains chart name it will be used as a full name.
*/}}
{{- define "gha-runner-scale-set.fullname" -}}
{{- $name := default (include "gha-base-name" .) }}
{{- printf "%s-%s" (include "gha-runner-scale-set.scale-set-name" .) $name | trunc 63 | trimSuffix "-" }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
@@ -42,7 +38,7 @@ app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/part-of: gha-rs
actions.github.com/scale-set-name: {{ include "gha-runner-scale-set.scale-set-name" . }}
actions.github.com/scale-set-name: {{ .Release.Name }}
actions.github.com/scale-set-namespace: {{ .Release.Namespace }}
{{- end }}
@@ -50,8 +46,8 @@ actions.github.com/scale-set-namespace: {{ .Release.Namespace }}
Selector labels
*/}}
{{- define "gha-runner-scale-set.selectorLabels" -}}
app.kubernetes.io/name: {{ include "gha-runner-scale-set.scale-set-name" . }}
app.kubernetes.io/instance: {{ include "gha-runner-scale-set.scale-set-name" . }}
app.kubernetes.io/name: {{ include "gha-runner-scale-set.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{- define "gha-runner-scale-set.githubsecret" -}}
@@ -99,7 +95,7 @@ volumeMounts:
image: docker:dind
args:
- dockerd
- --host=unix:///var/run/docker.sock
- --host=unix:///run/docker/docker.sock
- --group=$(DOCKER_GROUP_GID)
env:
- name: DOCKER_GROUP_GID
@@ -110,7 +106,7 @@ volumeMounts:
- name: work
mountPath: /home/runner/_work
- name: dind-sock
mountPath: /var/run
mountPath: /run/docker
- name: dind-externals
mountPath: /home/runner/externals
{{- end }}
@@ -223,7 +219,7 @@ env:
{{- end }}
{{- if $setDockerHost }}
- name: DOCKER_HOST
value: unix:///var/run/docker.sock
value: unix:///run/docker/docker.sock
{{- end }}
{{- if $setRunnerWaitDocker }}
- name: RUNNER_WAIT_FOR_DOCKER_IN_SECONDS
@@ -264,7 +260,8 @@ volumeMounts:
{{- end }}
{{- if $mountDindCert }}
- name: dind-sock
mountPath: /var/run
mountPath: /run/docker
readOnly: true
{{- end }}
{{- if $mountGitHubServerTLS }}
- name: github-server-tls-cert
@@ -384,9 +381,6 @@ volumeMounts:
{{- $setNodeExtraCaCerts = 1 }}
{{- $setRunnerUpdateCaCerts = 1 }}
{{- end }}
{{- $mountGitHubServerTLS := 0 }}
{{- if or $container.env $setNodeExtraCaCerts $setRunnerUpdateCaCerts }}
env:
{{- with $container.env }}
{{- range $i, $env := . }}
@@ -407,12 +401,10 @@ volumeMounts:
- name: RUNNER_UPDATE_CA_CERTS
value: "1"
{{- end }}
{{- $mountGitHubServerTLS := 0 }}
{{- if $tlsConfig.runnerMountPath }}
{{- $mountGitHubServerTLS = 1 }}
{{- end }}
{{- end }}
{{- if or $container.volumeMounts $mountGitHubServerTLS }}
volumeMounts:
{{- with $container.volumeMounts }}
{{- range $i, $volMount := . }}
@@ -427,7 +419,6 @@ volumeMounts:
mountPath: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }}
subPath: {{ $tlsConfig.certificateFrom.configMapKeyRef.key }}
{{- end }}
{{- end}}
{{- end }}
{{- end }}
{{- end }}
@@ -525,13 +516,13 @@ volumeMounts:
{{- end }}
{{- end }}
{{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }}
{{- fail "No gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
{{- fail "No gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }}
{{- fail "Found both gha-rs-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
{{- fail "Found both gha-rs-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- if gt $multiNamespacesCounter 1 }}
{{- fail "More than one gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
{{- fail "More than one gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- if eq $multiNamespacesCounter 1 }}
{{- with $controllerDeployment.metadata }}
@@ -544,11 +535,11 @@ volumeMounts:
{{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }}
{{- end }}
{{- else }}
{{- fail "No gha-rs-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
{{- fail "No gha-rs-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- end }}
{{- if eq $managerServiceAccountNamespace "" }}
{{- fail "No service account namespace found for gha-rs-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
{{- fail "No service account namespace found for gha-rs-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- $managerServiceAccountNamespace }}
{{- end }}

View File

@@ -1,19 +1,18 @@
apiVersion: actions.github.com/v1alpha1
kind: AutoscalingRunnerSet
metadata:
{{- if or (not (include "gha-runner-scale-set.scale-set-name" .)) (gt (len (include "gha-runner-scale-set.scale-set-name" .)) 45) }}
{{- if or (not .Release.Name) (gt (len .Release.Name) 45) }}
{{ fail "Name must have up to 45 characters" }}
{{- end }}
{{- if gt (len .Release.Namespace) 63 }}
{{ fail "Namespace must have up to 63 characters" }}
{{- end }}
name: {{ include "gha-runner-scale-set.scale-set-name" . }}
name: {{ .Values.runnerScaleSetName | default .Release.Name }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/component: "autoscaling-runner-set"
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
annotations:
actions.github.com/values-hash: {{ toJson .Values | sha256sum | trunc 63 }}
{{- $containerMode := .Values.containerMode }}
{{- if not (kindIs "string" .Values.githubConfigSecret) }}
actions.github.com/cleanup-github-secret-name: {{ include "gha-runner-scale-set.githubsecret" . }}

View File

@@ -5,12 +5,6 @@ kind: ServiceAccount
metadata:
name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- if .Values.containerMode.kubernetesModeServiceAccount }}
{{- with .Values.containerMode.kubernetesModeServiceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
finalizers:
- actions.github.com/cleanup-protection
labels:

View File

@@ -330,7 +330,7 @@ func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) {
assert.Equal(t, namespaceName, ars.Namespace)
assert.Equal(t, "test-runners", ars.Name)
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/name"])
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"])
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/part-of"])
assert.Equal(t, "autoscaling-runner-set", ars.Labels["app.kubernetes.io/component"])
@@ -361,7 +361,6 @@ func TestTemplateRenderedAutoScalingRunnerSet_RunnerScaleSetName(t *testing.T) {
require.NoError(t, err)
releaseName := "test-runners"
nameOverride := "test-runner-scale-set-name"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
@@ -369,7 +368,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_RunnerScaleSetName(t *testing.T) {
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"runnerScaleSetName": nameOverride,
"runnerScaleSetName": "test-runner-scale-set-name",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
@@ -382,15 +381,12 @@ func TestTemplateRenderedAutoScalingRunnerSet_RunnerScaleSetName(t *testing.T) {
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Equal(t, namespaceName, ars.Namespace)
assert.Equal(t, nameOverride, ars.Name)
assert.Equal(t, "test-runner-scale-set-name", ars.Name)
assert.Equal(t, nameOverride, ars.Labels["app.kubernetes.io/name"])
assert.Equal(t, nameOverride, ars.Labels["app.kubernetes.io/instance"])
assert.Equal(t, nameOverride, ars.Labels["actions.github.com/scale-set-name"])
assert.Equal(t, namespaceName, ars.Labels["actions.github.com/scale-set-namespace"])
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/part-of"])
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"])
assert.Equal(t, releaseName, ars.Labels["app.kubernetes.io/instance"])
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
assert.Equal(t, nameOverride+"-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
assert.Equal(t, "test-runner-scale-set-name", ars.Spec.RunnerScaleSetName)
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
@@ -742,37 +738,6 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraInitContainers(t *testin
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[2].Command[0], "InitContainers[2] Command[0] should be ls")
}
func TestTemplateRenderedKubernetesModeServiceAccountAnnotations(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
testValuesPath, err := filepath.Abs("../tests/values_kubernetes_mode_service_account_annotations.yaml")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
ValuesFiles: []string{testValuesPath},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_serviceaccount.yaml"})
var sa corev1.ServiceAccount
helm.UnmarshalK8SYaml(t, output, &sa)
assert.Equal(t, "arn:aws:iam::123456789012:role/sample-role", sa.Annotations["eks.amazonaws.com/role-arn"], "Annotations should be arn:aws:iam::123456789012:role/sample-role")
}
func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) {
t.Parallel()
@@ -875,7 +840,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
assert.Equal(t, namespaceName, ars.Namespace)
assert.Equal(t, "test-runners", ars.Name)
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/name"])
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"])
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
@@ -900,7 +865,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image)
assert.Len(t, ars.Spec.Template.Spec.Containers[0].Env, 2, "The runner container should have 2 env vars, DOCKER_HOST and RUNNER_WAIT_FOR_DOCKER_IN_SECONDS")
assert.Equal(t, "DOCKER_HOST", ars.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, "unix:///var/run/docker.sock", ars.Spec.Template.Spec.Containers[0].Env[0].Value)
assert.Equal(t, "unix:///run/docker/docker.sock", ars.Spec.Template.Spec.Containers[0].Env[0].Value)
assert.Equal(t, "RUNNER_WAIT_FOR_DOCKER_IN_SECONDS", ars.Spec.Template.Spec.Containers[0].Env[1].Name)
assert.Equal(t, "120", ars.Spec.Template.Spec.Containers[0].Env[1].Value)
@@ -910,7 +875,8 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
assert.False(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].ReadOnly)
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name)
assert.Equal(t, "/var/run", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath)
assert.Equal(t, "/run/docker", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath)
assert.True(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].ReadOnly)
assert.Equal(t, "dind", ars.Spec.Template.Spec.Containers[1].Name)
assert.Equal(t, "docker:dind", ars.Spec.Template.Spec.Containers[1].Image)
@@ -920,7 +886,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].MountPath)
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].Name)
assert.Equal(t, "/var/run", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].MountPath)
assert.Equal(t, "/run/docker", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].MountPath)
assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].Name)
assert.Equal(t, "/home/runner/externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].MountPath)
@@ -962,7 +928,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T)
assert.Equal(t, namespaceName, ars.Namespace)
assert.Equal(t, "test-runners", ars.Name)
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/name"])
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"])
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
@@ -1063,7 +1029,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_UsePredefinedSecret(t *testing.T)
assert.Equal(t, namespaceName, ars.Namespace)
assert.Equal(t, "test-runners", ars.Name)
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/name"])
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"])
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
assert.Equal(t, "pre-defined-secrets", ars.Spec.GitHubConfigSecret)
@@ -2016,130 +1982,3 @@ func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t
assert.Equal(t, value, autoscalingRunnerSet.Annotations[annotation], fmt.Sprintf("Annotation %q does not match the expected value", annotation))
}
}
func TestRunnerContainerEnvNotEmptyMap(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
testValuesPath, err := filepath.Abs("../tests/values.yaml")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
ValuesFiles: []string{testValuesPath},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
type testModel struct {
Spec struct {
Template struct {
Spec struct {
Containers []map[string]any `yaml:"containers"`
} `yaml:"spec"`
} `yaml:"template"`
} `yaml:"spec"`
}
var m testModel
helm.UnmarshalK8SYaml(t, output, &m)
_, ok := m.Spec.Template.Spec.Containers[0]["env"]
assert.False(t, ok, "env should not be set")
}
func TestRunnerContainerVolumeNotEmptyMap(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
testValuesPath, err := filepath.Abs("../tests/values.yaml")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
ValuesFiles: []string{testValuesPath},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
type testModel struct {
Spec struct {
Template struct {
Spec struct {
Containers []map[string]any `yaml:"containers"`
} `yaml:"spec"`
} `yaml:"template"`
} `yaml:"spec"`
}
var m testModel
helm.UnmarshalK8SYaml(t, output, &m)
_, ok := m.Spec.Template.Spec.Containers[0]["volumeMounts"]
assert.False(t, ok, "volumeMounts should not be set")
}
func TestAutoscalingRunnerSetAnnotationValuesHash(t *testing.T) {
t.Parallel()
const valuesHash = "actions.github.com/values-hash"
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
firstHash := autoscalingRunnerSet.Annotations["actions.github.com/values-hash"]
assert.NotEmpty(t, firstHash)
assert.LessOrEqual(t, len(firstHash), 63)
helmChartPath, err = filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
options = &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token1234567890",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
secondHash := autoscalingRunnerSet.Annotations[valuesHash]
assert.NotEmpty(t, secondHash)
assert.NotEqual(t, firstHash, secondHash)
assert.LessOrEqual(t, len(secondHash), 63)
}

View File

@@ -28,4 +28,4 @@ template:
path: /data
type: Directory
containerMode:
type: kubernetes
type: kubernetes

View File

@@ -1,8 +0,0 @@
githubConfigUrl: https://github.com/actions/actions-runner-controller
githubConfigSecret:
github_token: test
containerMode:
type: kubernetes
kubernetesModeServiceAccount:
annotations:
eks.amazonaws.com/role-arn: arn:aws:iam::123456789012:role/sample-role

View File

@@ -39,8 +39,7 @@ githubConfigSecret:
## maxRunners is the max number of runners the autoscaling runner set will scale up to.
# maxRunners: 5
## minRunners is the min number of idle runners. The target number of runners created will be
## calculated as a sum of minRunners and the number of jobs assigned to the scale set.
## minRunners is the min number of runners the autoscaling runner set will scale down to.
# minRunners: 0
# runnerGroup: "default"
@@ -85,8 +84,6 @@ githubConfigSecret:
# resources:
# requests:
# storage: 1Gi
# kubernetesModeServiceAccount:
# annotations:
## template is the PodSpec for each listener Pod
## For reference: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec
@@ -125,17 +122,18 @@ template:
## command: ["/home/runner/run.sh"]
## env:
## - name: DOCKER_HOST
## value: unix:///var/run/docker.sock
## value: unix:///run/docker/docker.sock
## volumeMounts:
## - name: work
## mountPath: /home/runner/_work
## - name: dind-sock
## mountPath: /var/run
## mountPath: /run/docker
## readOnly: true
## - name: dind
## image: docker:dind
## args:
## - dockerd
## - --host=unix:///var/run/docker.sock
## - --host=unix:///run/docker/docker.sock
## - --group=$(DOCKER_GROUP_GID)
## env:
## - name: DOCKER_GROUP_GID
@@ -146,7 +144,7 @@ template:
## - name: work
## mountPath: /home/runner/_work
## - name: dind-sock
## mountPath: /var/run
## mountPath: /run/docker
## - name: dind-externals
## mountPath: /home/runner/externals
## volumes:

View File

@@ -1,133 +0,0 @@
package app
import (
"context"
"errors"
"fmt"
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
"github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
"github.com/actions/actions-runner-controller/cmd/ghalistener/worker"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
"golang.org/x/sync/errgroup"
)
// App is responsible for initializing required components and running the app.
type App struct {
// configured fields
config config.Config
logger logr.Logger
// initialized fields
listener Listener
worker Worker
metrics metrics.ServerPublisher
}
//go:generate mockery --name Listener --output ./mocks --outpkg mocks --case underscore
type Listener interface {
Listen(ctx context.Context, handler listener.Handler) error
}
//go:generate mockery --name Worker --output ./mocks --outpkg mocks --case underscore
type Worker interface {
HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error
HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error)
}
func New(config config.Config) (*App, error) {
app := &App{
config: config,
}
ghConfig, err := actions.ParseGitHubConfigFromURL(config.ConfigureUrl)
if err != nil {
return nil, fmt.Errorf("failed to parse GitHub config from URL: %w", err)
}
{
logger, err := config.Logger()
if err != nil {
return nil, fmt.Errorf("failed to create logger: %w", err)
}
app.logger = logger.WithName("listener-app")
}
actionsClient, err := config.ActionsClient(app.logger)
if err != nil {
return nil, fmt.Errorf("failed to create actions client: %w", err)
}
if config.MetricsAddr != "" {
app.metrics = metrics.NewExporter(metrics.ExporterConfig{
ScaleSetName: config.EphemeralRunnerSetName,
ScaleSetNamespace: config.EphemeralRunnerSetNamespace,
Enterprise: ghConfig.Enterprise,
Organization: ghConfig.Organization,
Repository: ghConfig.Repository,
ServerAddr: config.MetricsAddr,
ServerEndpoint: config.MetricsEndpoint,
})
}
worker, err := worker.New(
worker.Config{
EphemeralRunnerSetNamespace: config.EphemeralRunnerSetNamespace,
EphemeralRunnerSetName: config.EphemeralRunnerSetName,
MaxRunners: config.MaxRunners,
MinRunners: config.MinRunners,
},
worker.WithLogger(app.logger.WithName("worker")),
)
if err != nil {
return nil, fmt.Errorf("failed to create new kubernetes worker: %w", err)
}
app.worker = worker
listener, err := listener.New(listener.Config{
Client: actionsClient,
ScaleSetID: app.config.RunnerScaleSetId,
MinRunners: app.config.MinRunners,
MaxRunners: app.config.MaxRunners,
Logger: app.logger.WithName("listener"),
Metrics: app.metrics,
})
if err != nil {
return nil, fmt.Errorf("failed to create new listener: %w", err)
}
app.listener = listener
app.logger.Info("app initialized")
return app, nil
}
func (app *App) Run(ctx context.Context) error {
var errs []error
if app.worker == nil {
errs = append(errs, fmt.Errorf("worker not initialized"))
}
if app.listener == nil {
errs = append(errs, fmt.Errorf("listener not initialized"))
}
if err := errors.Join(errs...); err != nil {
return fmt.Errorf("app not initialized: %w", err)
}
g, ctx := errgroup.WithContext(ctx)
g.Go(func() error {
app.logger.Info("Starting listener")
return app.listener.Listen(ctx, app.worker)
})
if app.metrics != nil {
g.Go(func() error {
app.logger.Info("Starting metrics server")
return app.metrics.ListenAndServe(ctx)
})
}
return g.Wait()
}

View File

@@ -1,85 +0,0 @@
package app
import (
"context"
"errors"
"testing"
appmocks "github.com/actions/actions-runner-controller/cmd/ghalistener/app/mocks"
"github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
metricsMocks "github.com/actions/actions-runner-controller/cmd/ghalistener/metrics/mocks"
"github.com/actions/actions-runner-controller/cmd/ghalistener/worker"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
func TestApp_Run(t *testing.T) {
t.Parallel()
t.Run("ListenerWorkerGuard", func(t *testing.T) {
invalidApps := []*App{
{},
{worker: &worker.Worker{}},
{listener: &listener.Listener{}},
}
for _, app := range invalidApps {
assert.Error(t, app.Run(context.Background()))
}
})
t.Run("ExitsOnListenerError", func(t *testing.T) {
listener := appmocks.NewListener(t)
worker := appmocks.NewWorker(t)
listener.On("Listen", mock.Anything, mock.Anything).Return(errors.New("listener error")).Once()
app := &App{
listener: listener,
worker: worker,
}
err := app.Run(context.Background())
assert.Error(t, err)
})
t.Run("ExitsOnListenerNil", func(t *testing.T) {
listener := appmocks.NewListener(t)
worker := appmocks.NewWorker(t)
listener.On("Listen", mock.Anything, mock.Anything).Return(nil).Once()
app := &App{
listener: listener,
worker: worker,
}
err := app.Run(context.Background())
assert.NoError(t, err)
})
t.Run("CancelListenerOnMetricsServerError", func(t *testing.T) {
listener := appmocks.NewListener(t)
worker := appmocks.NewWorker(t)
metrics := metricsMocks.NewServerPublisher(t)
ctx := context.Background()
listener.On("Listen", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
ctx := args.Get(0).(context.Context)
go func() {
<-ctx.Done()
}()
}).Return(nil).Once()
metrics.On("ListenAndServe", mock.Anything).Return(errors.New("metrics server error")).Once()
app := &App{
listener: listener,
worker: worker,
metrics: metrics,
}
err := app.Run(ctx)
assert.Error(t, err)
})
}

View File

@@ -1,43 +0,0 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package mocks
import (
context "context"
listener "github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
mock "github.com/stretchr/testify/mock"
)
// Listener is an autogenerated mock type for the Listener type
type Listener struct {
mock.Mock
}
// Listen provides a mock function with given fields: ctx, handler
func (_m *Listener) Listen(ctx context.Context, handler listener.Handler) error {
ret := _m.Called(ctx, handler)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, listener.Handler) error); ok {
r0 = rf(ctx, handler)
} else {
r0 = ret.Error(0)
}
return r0
}
// NewListener creates a new instance of Listener. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewListener(t interface {
mock.TestingT
Cleanup(func())
}) *Listener {
mock := &Listener{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -1,68 +0,0 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package mocks
import (
actions "github.com/actions/actions-runner-controller/github/actions"
context "context"
mock "github.com/stretchr/testify/mock"
)
// Worker is an autogenerated mock type for the Worker type
type Worker struct {
mock.Mock
}
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count, acquireCount
func (_m *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, acquireCount int) (int, error) {
ret := _m.Called(ctx, count, acquireCount)
var r0 int
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, int) (int, error)); ok {
return rf(ctx, count, acquireCount)
}
if rf, ok := ret.Get(0).(func(context.Context, int, int) int); ok {
r0 = rf(ctx, count, acquireCount)
} else {
r0 = ret.Get(0).(int)
}
if rf, ok := ret.Get(1).(func(context.Context, int, int) error); ok {
r1 = rf(ctx, count, acquireCount)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// HandleJobStarted provides a mock function with given fields: ctx, jobInfo
func (_m *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
ret := _m.Called(ctx, jobInfo)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *actions.JobStarted) error); ok {
r0 = rf(ctx, jobInfo)
} else {
r0 = ret.Error(0)
}
return r0
}
// NewWorker creates a new instance of Worker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewWorker(t interface {
mock.TestingT
Cleanup(func())
}) *Worker {
mock := &Worker{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -1,161 +0,0 @@
package config
import (
"crypto/x509"
"encoding/json"
"fmt"
"net/http"
"net/url"
"os"
"github.com/actions/actions-runner-controller/build"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/logging"
"github.com/go-logr/logr"
"golang.org/x/net/http/httpproxy"
)
type Config struct {
ConfigureUrl string `json:"configureUrl"`
AppID int64 `json:"appID"`
AppInstallationID int64 `json:"appInstallationID"`
AppPrivateKey string `json:"appPrivateKey"`
Token string `json:"token"`
EphemeralRunnerSetNamespace string `json:"ephemeralRunnerSetNamespace"`
EphemeralRunnerSetName string `json:"ephemeralRunnerSetName"`
MaxRunners int `json:"maxRunners"`
MinRunners int `json:"minRunners"`
RunnerScaleSetId int `json:"runnerScaleSetId"`
RunnerScaleSetName string `json:"runnerScaleSetName"`
ServerRootCA string `json:"serverRootCA"`
LogLevel string `json:"logLevel"`
LogFormat string `json:"logFormat"`
MetricsAddr string `json:"metricsAddr"`
MetricsEndpoint string `json:"metricsEndpoint"`
}
func Read(path string) (Config, error) {
f, err := os.Open(path)
if err != nil {
return Config{}, err
}
defer f.Close()
var config Config
if err := json.NewDecoder(f).Decode(&config); err != nil {
return Config{}, fmt.Errorf("failed to decode config: %w", err)
}
if err := config.validate(); err != nil {
return Config{}, fmt.Errorf("failed to validate config: %w", err)
}
return config, nil
}
func (c *Config) validate() error {
if len(c.ConfigureUrl) == 0 {
return fmt.Errorf("GitHubConfigUrl is not provided")
}
if len(c.EphemeralRunnerSetNamespace) == 0 || len(c.EphemeralRunnerSetName) == 0 {
return fmt.Errorf("EphemeralRunnerSetNamespace '%s' or EphemeralRunnerSetName '%s' is missing", c.EphemeralRunnerSetNamespace, c.EphemeralRunnerSetName)
}
if c.RunnerScaleSetId == 0 {
return fmt.Errorf("RunnerScaleSetId '%d' is missing", c.RunnerScaleSetId)
}
if c.MaxRunners < c.MinRunners {
return fmt.Errorf("MinRunners '%d' cannot be greater than MaxRunners '%d'", c.MinRunners, c.MaxRunners)
}
hasToken := len(c.Token) > 0
hasPrivateKeyConfig := c.AppID > 0 && c.AppPrivateKey != ""
if !hasToken && !hasPrivateKeyConfig {
return fmt.Errorf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
}
if hasToken && hasPrivateKeyConfig {
return fmt.Errorf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
}
return nil
}
func (c *Config) Logger() (logr.Logger, error) {
logLevel := string(logging.LogLevelDebug)
if c.LogLevel != "" {
logLevel = c.LogLevel
}
logFormat := string(logging.LogFormatText)
if c.LogFormat != "" {
logFormat = c.LogFormat
}
logger, err := logging.NewLogger(logLevel, logFormat)
if err != nil {
return logr.Logger{}, fmt.Errorf("NewLogger failed: %w", err)
}
return logger, nil
}
func (c *Config) ActionsClient(logger logr.Logger, clientOptions ...actions.ClientOption) (*actions.Client, error) {
var creds actions.ActionsAuth
switch c.Token {
case "":
creds.AppCreds = &actions.GitHubAppAuth{
AppID: c.AppID,
AppInstallationID: c.AppInstallationID,
AppPrivateKey: c.AppPrivateKey,
}
default:
creds.Token = c.Token
}
options := append([]actions.ClientOption{
actions.WithLogger(logger),
}, clientOptions...)
if c.ServerRootCA != "" {
systemPool, err := x509.SystemCertPool()
if err != nil {
return nil, fmt.Errorf("failed to load system cert pool: %w", err)
}
pool := systemPool.Clone()
ok := pool.AppendCertsFromPEM([]byte(c.ServerRootCA))
if !ok {
return nil, fmt.Errorf("failed to parse root certificate")
}
options = append(options, actions.WithRootCAs(pool))
}
proxyFunc := httpproxy.FromEnvironment().ProxyFunc()
options = append(options, actions.WithProxy(func(req *http.Request) (*url.URL, error) {
return proxyFunc(req.URL)
}))
client, err := actions.NewClient(c.ConfigureUrl, &creds, options...)
if err != nil {
return nil, fmt.Errorf("failed to create actions client: %w", err)
}
client.SetUserAgent(actions.UserAgentInfo{
Version: build.Version,
CommitSHA: build.CommitSHA,
ScaleSetID: c.RunnerScaleSetId,
HasProxy: hasProxy(),
Subsystem: "ghalistener",
})
return client, nil
}
func hasProxy() bool {
proxyFunc := httpproxy.FromEnvironment().ProxyFunc()
return proxyFunc != nil
}

View File

@@ -1,161 +0,0 @@
package config_test
import (
"context"
"crypto/tls"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"testing"
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/github/actions/testserver"
"github.com/go-logr/logr"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestCustomerServerRootCA(t *testing.T) {
ctx := context.Background()
certsFolder := filepath.Join(
"../../../",
"github",
"actions",
"testdata",
)
certPath := filepath.Join(certsFolder, "server.crt")
keyPath := filepath.Join(certsFolder, "server.key")
serverCalledSuccessfully := false
server := testserver.NewUnstarted(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
serverCalledSuccessfully = true
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"count": 0}`))
}))
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
require.NoError(t, err)
server.TLS = &tls.Config{Certificates: []tls.Certificate{cert}}
server.StartTLS()
var certsString string
rootCA, err := os.ReadFile(filepath.Join(certsFolder, "rootCA.crt"))
require.NoError(t, err)
certsString = string(rootCA)
intermediate, err := os.ReadFile(filepath.Join(certsFolder, "intermediate.pem"))
require.NoError(t, err)
certsString = certsString + string(intermediate)
config := config.Config{
ConfigureUrl: server.ConfigURLForOrg("myorg"),
ServerRootCA: certsString,
Token: "token",
}
client, err := config.ActionsClient(logr.Discard())
require.NoError(t, err)
_, err = client.GetRunnerScaleSet(ctx, 1, "test")
require.NoError(t, err)
assert.True(t, serverCalledSuccessfully)
}
func TestProxySettings(t *testing.T) {
t.Run("http", func(t *testing.T) {
wentThroughProxy := false
proxy := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
wentThroughProxy = true
}))
t.Cleanup(func() {
proxy.Close()
})
prevProxy := os.Getenv("http_proxy")
os.Setenv("http_proxy", proxy.URL)
defer os.Setenv("http_proxy", prevProxy)
config := config.Config{
ConfigureUrl: "https://github.com/org/repo",
Token: "token",
}
client, err := config.ActionsClient(logr.Discard())
require.NoError(t, err)
req, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
require.NoError(t, err)
_, err = client.Do(req)
require.NoError(t, err)
assert.True(t, wentThroughProxy)
})
t.Run("https", func(t *testing.T) {
wentThroughProxy := false
proxy := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
wentThroughProxy = true
}))
t.Cleanup(func() {
proxy.Close()
})
prevProxy := os.Getenv("https_proxy")
os.Setenv("https_proxy", proxy.URL)
defer os.Setenv("https_proxy", prevProxy)
config := config.Config{
ConfigureUrl: "https://github.com/org/repo",
Token: "token",
}
client, err := config.ActionsClient(logr.Discard(), actions.WithRetryMax(0))
require.NoError(t, err)
req, err := http.NewRequest(http.MethodGet, "https://example.com", nil)
require.NoError(t, err)
_, err = client.Do(req)
// proxy doesn't support https
assert.Error(t, err)
assert.True(t, wentThroughProxy)
})
t.Run("no_proxy", func(t *testing.T) {
wentThroughProxy := false
proxy := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
wentThroughProxy = true
}))
t.Cleanup(func() {
proxy.Close()
})
prevProxy := os.Getenv("http_proxy")
os.Setenv("http_proxy", proxy.URL)
defer os.Setenv("http_proxy", prevProxy)
prevNoProxy := os.Getenv("no_proxy")
os.Setenv("no_proxy", "example.com")
defer os.Setenv("no_proxy", prevNoProxy)
config := config.Config{
ConfigureUrl: "https://github.com/org/repo",
Token: "token",
}
client, err := config.ActionsClient(logr.Discard())
require.NoError(t, err)
req, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
require.NoError(t, err)
_, err = client.Do(req)
require.NoError(t, err)
assert.False(t, wentThroughProxy)
})
}

View File

@@ -1,92 +0,0 @@
package config
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestConfigValidationMinMax(t *testing.T) {
config := &Config{
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 5,
MaxRunners: 2,
Token: "token",
}
err := config.validate()
assert.ErrorContains(t, err, "MinRunners '5' cannot be greater than MaxRunners '2", "Expected error about MinRunners > MaxRunners")
}
func TestConfigValidationMissingToken(t *testing.T) {
config := &Config{
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.validate()
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidationAppKey(t *testing.T) {
config := &Config{
AppID: 1,
AppInstallationID: 10,
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.validate()
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) {
config := &Config{
AppID: 1,
AppInstallationID: 10,
AppPrivateKey: "asdf",
Token: "asdf",
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.validate()
expectedError := fmt.Sprintf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidation(t *testing.T) {
config := &Config{
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 1,
MaxRunners: 5,
Token: "asdf",
}
err := config.validate()
assert.NoError(t, err, "Expected no error")
}
func TestConfigValidationConfigUrl(t *testing.T) {
config := &Config{
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.validate()
assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl")
}

View File

@@ -1,430 +0,0 @@
package listener
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"os"
"time"
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
"github.com/google/uuid"
)
const (
sessionCreationMaxRetries = 10
)
// message types
const (
messageTypeJobAvailable = "JobAvailable"
messageTypeJobAssigned = "JobAssigned"
messageTypeJobStarted = "JobStarted"
messageTypeJobCompleted = "JobCompleted"
)
//go:generate mockery --name Client --output ./mocks --outpkg mocks --case underscore
type Client interface {
GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*actions.AcquirableJobList, error)
CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*actions.RunnerScaleSetSession, error)
GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64) (*actions.RunnerScaleSetMessage, error)
DeleteMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, messageId int64) error
AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error)
RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*actions.RunnerScaleSetSession, error)
DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error
}
type Config struct {
Client Client
ScaleSetID int
MinRunners int
MaxRunners int
Logger logr.Logger
Metrics metrics.Publisher
}
func (c *Config) Validate() error {
if c.Client == nil {
return errors.New("client is required")
}
if c.ScaleSetID == 0 {
return errors.New("scaleSetID is required")
}
if c.MinRunners < 0 {
return errors.New("minRunners must be greater than or equal to 0")
}
if c.MaxRunners < 0 {
return errors.New("maxRunners must be greater than or equal to 0")
}
if c.MaxRunners > 0 && c.MinRunners > c.MaxRunners {
return errors.New("minRunners must be less than or equal to maxRunners")
}
return nil
}
// The Listener's role is to manage all interactions with the actions service.
// It receives messages and processes them using the given handler.
type Listener struct {
// configured fields
scaleSetID int // The ID of the scale set associated with the listener.
client Client // The client used to interact with the scale set.
metrics metrics.Publisher // The publisher used to publish metrics.
// internal fields
logger logr.Logger // The logger used for logging.
hostname string // The hostname of the listener.
// updated fields
lastMessageID int64 // The ID of the last processed message.
session *actions.RunnerScaleSetSession // The session for managing the runner scale set.
}
func New(config Config) (*Listener, error) {
if err := config.Validate(); err != nil {
return nil, fmt.Errorf("invalid config: %w", err)
}
listener := &Listener{
scaleSetID: config.ScaleSetID,
client: config.Client,
logger: config.Logger,
metrics: metrics.Discard,
}
if config.Metrics != nil {
listener.metrics = config.Metrics
}
listener.metrics.PublishStatic(config.MinRunners, config.MaxRunners)
hostname, err := os.Hostname()
if err != nil {
hostname = uuid.NewString()
listener.logger.Info("Failed to get hostname, fallback to uuid", "uuid", hostname, "error", err)
}
listener.hostname = hostname
return listener, nil
}
//go:generate mockery --name Handler --output ./mocks --outpkg mocks --case underscore
type Handler interface {
HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error
HandleDesiredRunnerCount(ctx context.Context, count, jobsCompleted int) (int, error)
}
// Listen listens for incoming messages and handles them using the provided handler.
// It continuously listens for messages until the context is cancelled.
// The initial message contains the current statistics and acquirable jobs, if any.
// The handler is responsible for handling the initial message and subsequent messages.
// If an error occurs during any step, Listen returns an error.
func (l *Listener) Listen(ctx context.Context, handler Handler) error {
if err := l.createSession(ctx); err != nil {
return fmt.Errorf("createSession failed: %w", err)
}
defer func() {
if err := l.deleteMessageSession(); err != nil {
l.logger.Error(err, "failed to delete message session")
}
}()
initialMessage := &actions.RunnerScaleSetMessage{
MessageId: 0,
MessageType: "RunnerScaleSetJobMessages",
Statistics: l.session.Statistics,
Body: "",
}
if l.session.Statistics == nil {
return fmt.Errorf("session statistics is nil")
}
l.metrics.PublishStatistics(initialMessage.Statistics)
desiredRunners, err := handler.HandleDesiredRunnerCount(ctx, initialMessage.Statistics.TotalAssignedJobs, 0)
if err != nil {
return fmt.Errorf("handling initial message failed: %w", err)
}
l.metrics.PublishDesiredRunners(desiredRunners)
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
msg, err := l.getMessage(ctx)
if err != nil {
return fmt.Errorf("failed to get message: %w", err)
}
if msg == nil {
continue
}
// New context is created to avoid cancelation during message handling.
if err := l.handleMessage(context.Background(), handler, msg); err != nil {
return fmt.Errorf("failed to handle message: %w", err)
}
}
}
func (l *Listener) handleMessage(ctx context.Context, handler Handler, msg *actions.RunnerScaleSetMessage) error {
parsedMsg, err := l.parseMessage(ctx, msg)
if err != nil {
return fmt.Errorf("failed to parse message: %w", err)
}
l.metrics.PublishStatistics(parsedMsg.statistics)
if len(parsedMsg.jobsAvailable) > 0 {
acquiredJobIDs, err := l.acquireAvailableJobs(ctx, parsedMsg.jobsAvailable)
if err != nil {
return fmt.Errorf("failed to acquire jobs: %w", err)
}
l.logger.Info("Jobs are acquired", "count", len(acquiredJobIDs), "requestIds", fmt.Sprint(acquiredJobIDs))
}
for _, jobCompleted := range parsedMsg.jobsCompleted {
l.metrics.PublishJobCompleted(jobCompleted)
}
l.lastMessageID = msg.MessageId
if err := l.deleteLastMessage(ctx); err != nil {
return fmt.Errorf("failed to delete message: %w", err)
}
for _, jobStarted := range parsedMsg.jobsStarted {
if err := handler.HandleJobStarted(ctx, jobStarted); err != nil {
return fmt.Errorf("failed to handle job started: %w", err)
}
l.metrics.PublishJobStarted(jobStarted)
}
desiredRunners, err := handler.HandleDesiredRunnerCount(ctx, parsedMsg.statistics.TotalAssignedJobs, len(parsedMsg.jobsCompleted))
if err != nil {
return fmt.Errorf("failed to handle desired runner count: %w", err)
}
l.metrics.PublishDesiredRunners(desiredRunners)
return nil
}
func (l *Listener) createSession(ctx context.Context) error {
var session *actions.RunnerScaleSetSession
var retries int
for {
var err error
session, err = l.client.CreateMessageSession(ctx, l.scaleSetID, l.hostname)
if err == nil {
break
}
clientErr := &actions.HttpClientSideError{}
if !errors.As(err, &clientErr) {
return fmt.Errorf("failed to create session: %w", err)
}
if clientErr.Code != http.StatusConflict {
return fmt.Errorf("failed to create session: %w", err)
}
retries++
if retries >= sessionCreationMaxRetries {
return fmt.Errorf("failed to create session after %d retries: %w", retries, err)
}
l.logger.Info("Unable to create message session. Will try again in 30 seconds", "error", err.Error())
select {
case <-ctx.Done():
return fmt.Errorf("context cancelled: %w", ctx.Err())
case <-time.After(30 * time.Second):
}
}
statistics, err := json.Marshal(session.Statistics)
if err != nil {
return fmt.Errorf("failed to marshal statistics: %w", err)
}
l.logger.Info("Current runner scale set statistics.", "statistics", string(statistics))
l.session = session
return nil
}
func (l *Listener) getMessage(ctx context.Context) (*actions.RunnerScaleSetMessage, error) {
l.logger.Info("Getting next message", "lastMessageID", l.lastMessageID)
msg, err := l.client.GetMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID)
if err == nil { // if NO error
return msg, nil
}
expiredError := &actions.MessageQueueTokenExpiredError{}
if !errors.As(err, &expiredError) {
return nil, fmt.Errorf("failed to get next message: %w", err)
}
if err := l.refreshSession(ctx); err != nil {
return nil, err
}
l.logger.Info("Getting next message", "lastMessageID", l.lastMessageID)
msg, err = l.client.GetMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID)
if err != nil { // if NO error
return nil, fmt.Errorf("failed to get next message after message session refresh: %w", err)
}
return msg, nil
}
func (l *Listener) deleteLastMessage(ctx context.Context) error {
l.logger.Info("Deleting last message", "lastMessageID", l.lastMessageID)
if err := l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID); err != nil {
return fmt.Errorf("failed to delete message: %w", err)
}
return nil
}
type parsedMessage struct {
statistics *actions.RunnerScaleSetStatistic
jobsStarted []*actions.JobStarted
jobsAvailable []*actions.JobAvailable
jobsCompleted []*actions.JobCompleted
}
func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSetMessage) (*parsedMessage, error) {
if msg.MessageType != "RunnerScaleSetJobMessages" {
l.logger.Info("Skipping message", "messageType", msg.MessageType)
return nil, fmt.Errorf("invalid message type: %s", msg.MessageType)
}
l.logger.Info("Processing message", "messageId", msg.MessageId, "messageType", msg.MessageType)
if msg.Statistics == nil {
return nil, fmt.Errorf("invalid message: statistics is nil")
}
l.logger.Info("New runner scale set statistics.", "statistics", msg.Statistics)
var batchedMessages []json.RawMessage
if len(msg.Body) > 0 {
if err := json.Unmarshal([]byte(msg.Body), &batchedMessages); err != nil {
return nil, fmt.Errorf("failed to unmarshal batched messages: %w", err)
}
}
parsedMsg := &parsedMessage{
statistics: msg.Statistics,
}
for _, msg := range batchedMessages {
var messageType actions.JobMessageType
if err := json.Unmarshal(msg, &messageType); err != nil {
return nil, fmt.Errorf("failed to decode job message type: %w", err)
}
switch messageType.MessageType {
case messageTypeJobAvailable:
var jobAvailable actions.JobAvailable
if err := json.Unmarshal(msg, &jobAvailable); err != nil {
return nil, fmt.Errorf("failed to decode job available: %w", err)
}
l.logger.Info("Job available message received", "jobId", jobAvailable.RunnerRequestId)
parsedMsg.jobsAvailable = append(parsedMsg.jobsAvailable, &jobAvailable)
case messageTypeJobAssigned:
var jobAssigned actions.JobAssigned
if err := json.Unmarshal(msg, &jobAssigned); err != nil {
return nil, fmt.Errorf("failed to decode job assigned: %w", err)
}
l.logger.Info("Job assigned message received", "jobId", jobAssigned.RunnerRequestId)
case messageTypeJobStarted:
var jobStarted actions.JobStarted
if err := json.Unmarshal(msg, &jobStarted); err != nil {
return nil, fmt.Errorf("could not decode job started message. %w", err)
}
l.logger.Info("Job started message received.", "RequestId", jobStarted.RunnerRequestId, "RunnerId", jobStarted.RunnerId)
parsedMsg.jobsStarted = append(parsedMsg.jobsStarted, &jobStarted)
case messageTypeJobCompleted:
var jobCompleted actions.JobCompleted
if err := json.Unmarshal(msg, &jobCompleted); err != nil {
return nil, fmt.Errorf("failed to decode job completed: %w", err)
}
l.logger.Info("Job completed message received.", "RequestId", jobCompleted.RunnerRequestId, "Result", jobCompleted.Result, "RunnerId", jobCompleted.RunnerId, "RunnerName", jobCompleted.RunnerName)
parsedMsg.jobsCompleted = append(parsedMsg.jobsCompleted, &jobCompleted)
default:
l.logger.Info("unknown job message type.", "messageType", messageType.MessageType)
}
}
return parsedMsg, nil
}
func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*actions.JobAvailable) ([]int64, error) {
ids := make([]int64, 0, len(jobsAvailable))
for _, job := range jobsAvailable {
ids = append(ids, job.RunnerRequestId)
}
l.logger.Info("Acquiring jobs", "count", len(ids), "requestIds", fmt.Sprint(ids))
idsAcquired, err := l.client.AcquireJobs(ctx, l.scaleSetID, l.session.MessageQueueAccessToken, ids)
if err == nil { // if NO errors
return idsAcquired, nil
}
expiredError := &actions.MessageQueueTokenExpiredError{}
if !errors.As(err, &expiredError) {
return nil, fmt.Errorf("failed to acquire jobs: %w", err)
}
if err := l.refreshSession(ctx); err != nil {
return nil, err
}
idsAcquired, err = l.client.AcquireJobs(ctx, l.scaleSetID, l.session.MessageQueueAccessToken, ids)
if err != nil {
return nil, fmt.Errorf("failed to acquire jobs after session refresh: %w", err)
}
return idsAcquired, nil
}
func (l *Listener) refreshSession(ctx context.Context) error {
l.logger.Info("Message queue token is expired during GetNextMessage, refreshing...")
session, err := l.client.RefreshMessageSession(ctx, l.session.RunnerScaleSet.Id, l.session.SessionId)
if err != nil {
return fmt.Errorf("refresh message session failed. %w", err)
}
l.session = session
return nil
}
func (l *Listener) deleteMessageSession() error {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
l.logger.Info("Deleting message session")
if err := l.client.DeleteMessageSession(ctx, l.session.RunnerScaleSet.Id, l.session.SessionId); err != nil {
return fmt.Errorf("failed to delete message session: %w", err)
}
return nil
}

View File

@@ -1,878 +0,0 @@
package listener
import (
"context"
"encoding/json"
"errors"
"net/http"
"testing"
"time"
listenermocks "github.com/actions/actions-runner-controller/cmd/ghalistener/listener/mocks"
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestNew(t *testing.T) {
t.Parallel()
t.Run("InvalidConfig", func(t *testing.T) {
t.Parallel()
var config Config
_, err := New(config)
assert.NotNil(t, err)
})
t.Run("ValidConfig", func(t *testing.T) {
t.Parallel()
config := Config{
Client: listenermocks.NewClient(t),
ScaleSetID: 1,
Metrics: metrics.Discard,
}
l, err := New(config)
assert.Nil(t, err)
assert.NotNil(t, l)
})
}
func TestListener_createSession(t *testing.T) {
t.Parallel()
t.Run("FailOnce", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(nil, assert.AnError).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
err = l.createSession(ctx)
assert.NotNil(t, err)
})
t.Run("FailContext", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(nil,
&actions.HttpClientSideError{Code: http.StatusConflict}).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
err = l.createSession(ctx)
assert.True(t, errors.Is(err, context.DeadlineExceeded))
})
t.Run("SetsSession", func(t *testing.T) {
t.Parallel()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("CreateMessageSession", mock.Anything, mock.Anything, mock.Anything).Return(session, nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
err = l.createSession(context.Background())
assert.Nil(t, err)
assert.Equal(t, session, l.session)
})
}
func TestListener_getMessage(t *testing.T) {
t.Parallel()
t.Run("ReceivesMessage", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
want := &actions.RunnerScaleSetMessage{
MessageId: 1,
}
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(want, nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{}
got, err := l.getMessage(ctx)
assert.Nil(t, err)
assert.Equal(t, want, got)
})
t.Run("NotExpiredError", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, &actions.HttpClientSideError{Code: http.StatusNotFound}).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{}
_, err = l.getMessage(ctx)
assert.NotNil(t, err)
})
t.Run("RefreshAndSucceeds", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once()
want := &actions.RunnerScaleSetMessage{
MessageId: 1,
}
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(want, nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
got, err := l.getMessage(ctx)
assert.Nil(t, err)
assert.Equal(t, want, got)
})
t.Run("RefreshAndFails", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, &actions.MessageQueueTokenExpiredError{}).Twice()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
got, err := l.getMessage(ctx)
assert.NotNil(t, err)
assert.Nil(t, got)
})
}
func TestListener_refreshSession(t *testing.T) {
t.Parallel()
t.Run("SuccessfullyRefreshes", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
newUUID := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &newUUID,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
oldUUID := uuid.New()
l.session = &actions.RunnerScaleSetSession{
SessionId: &oldUUID,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
err = l.refreshSession(ctx)
assert.Nil(t, err)
assert.Equal(t, session, l.session)
})
t.Run("FailsToRefresh", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(nil, errors.New("error")).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
oldUUID := uuid.New()
oldSession := &actions.RunnerScaleSetSession{
SessionId: &oldUUID,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
l.session = oldSession
err = l.refreshSession(ctx)
assert.NotNil(t, err)
assert.Equal(t, oldSession, l.session)
})
}
func TestListener_deleteLastMessage(t *testing.T) {
t.Parallel()
t.Run("SuccessfullyDeletes", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.MatchedBy(func(lastMessageID any) bool {
return lastMessageID.(int64) == int64(5)
})).Return(nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{}
l.lastMessageID = 5
err = l.deleteLastMessage(ctx)
assert.Nil(t, err)
})
t.Run("FailsToDelete", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("error")).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{}
l.lastMessageID = 5
err = l.deleteLastMessage(ctx)
assert.NotNil(t, err)
})
}
func TestListener_Listen(t *testing.T) {
t.Parallel()
t.Run("CreateSessionFails", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(nil, assert.AnError).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
err = l.Listen(ctx, nil)
assert.NotNil(t, err)
})
t.Run("CallHandleRegardlessOfInitialMessage", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: &actions.RunnerScaleSetStatistic{},
}
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessageSession", mock.Anything, session.RunnerScaleSet.Id, session.SessionId).Return(nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
var called bool
handler := listenermocks.NewHandler(t)
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 0).
Return(0, nil).
Run(
func(mock.Arguments) {
called = true
cancel()
},
).
Once()
err = l.Listen(ctx, handler)
assert.True(t, errors.Is(err, context.Canceled))
assert.True(t, called)
})
t.Run("CancelContextAfterGetMessage", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: &actions.RunnerScaleSetStatistic{},
}
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessageSession", mock.Anything, session.RunnerScaleSet.Id, session.SessionId).Return(nil).Once()
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Statistics: &actions.RunnerScaleSetStatistic{},
}
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything).
Return(msg, nil).
Run(
func(mock.Arguments) {
cancel()
},
).
Once()
// Ensure delete message is called with background context
client.On("DeleteMessage", context.Background(), mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
config.Client = client
handler := listenermocks.NewHandler(t)
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 0).
Return(0, nil).
Once()
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 0).
Return(0, nil).
Once()
l, err := New(config)
require.Nil(t, err)
err = l.Listen(ctx, handler)
assert.ErrorIs(t, context.Canceled, err)
})
}
func TestListener_acquireAvailableJobs(t *testing.T) {
t.Parallel()
t.Run("FailingToAcquireJobs", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, assert.AnError).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
uuid := uuid.New()
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: &actions.RunnerScaleSetStatistic{},
}
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 3,
},
},
}
_, err = l.acquireAvailableJobs(ctx, availableJobs)
assert.Error(t, err)
})
t.Run("SuccessfullyAcquiresJobsOnFirstRun", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
jobIDs := []int64{1, 2, 3}
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(jobIDs, nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
uuid := uuid.New()
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: &actions.RunnerScaleSetStatistic{},
}
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 3,
},
},
}
acquiredJobIDs, err := l.acquireAvailableJobs(ctx, availableJobs)
assert.NoError(t, err)
assert.Equal(t, []int64{1, 2, 3}, acquiredJobIDs)
})
t.Run("RefreshAndSucceeds", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
// Second call to AcquireJobs will succeed
want := []int64{1, 2, 3}
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 3,
},
},
}
// First call to AcquireJobs will fail with a token expired error
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).
Run(func(args mock.Arguments) {
ids := args.Get(3).([]int64)
assert.Equal(t, want, ids)
}).
Return(nil, &actions.MessageQueueTokenExpiredError{}).
Once()
// Second call should succeed
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).
Run(func(args mock.Arguments) {
ids := args.Get(3).([]int64)
assert.Equal(t, want, ids)
}).
Return(want, nil).
Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
got, err := l.acquireAvailableJobs(ctx, availableJobs)
assert.Nil(t, err)
assert.Equal(t, want, got)
})
t.Run("RefreshAndFails", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, &actions.MessageQueueTokenExpiredError{}).Twice()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 3,
},
},
}
got, err := l.acquireAvailableJobs(ctx, availableJobs)
assert.NotNil(t, err)
assert.Nil(t, got)
})
}
func TestListener_parseMessage(t *testing.T) {
t.Run("FailOnEmptyStatistics", func(t *testing.T) {
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Statistics: nil,
}
l := &Listener{}
parsedMsg, err := l.parseMessage(context.Background(), msg)
assert.Error(t, err)
assert.Nil(t, parsedMsg)
})
t.Run("FailOnIncorrectMessageType", func(t *testing.T) {
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerMessages", // arbitrary message type
Statistics: &actions.RunnerScaleSetStatistic{},
}
l := &Listener{}
parsedMsg, err := l.parseMessage(context.Background(), msg)
assert.Error(t, err)
assert.Nil(t, parsedMsg)
})
t.Run("ParseAll", func(t *testing.T) {
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Body: "",
Statistics: &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
TotalAcquiredJobs: 2,
TotalAssignedJobs: 3,
TotalRunningJobs: 4,
TotalRegisteredRunners: 5,
TotalBusyRunners: 6,
TotalIdleRunners: 7,
},
}
var batchedMessages []any
jobsAvailable := []*actions.JobAvailable{
{
AcquireJobUrl: "https://github.com/example",
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAvailable,
},
RunnerRequestId: 1,
},
},
{
AcquireJobUrl: "https://github.com/example",
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAvailable,
},
RunnerRequestId: 2,
},
},
}
for _, msg := range jobsAvailable {
batchedMessages = append(batchedMessages, msg)
}
jobsAssigned := []*actions.JobAssigned{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAssigned,
},
RunnerRequestId: 3,
},
},
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAssigned,
},
RunnerRequestId: 4,
},
},
}
for _, msg := range jobsAssigned {
batchedMessages = append(batchedMessages, msg)
}
jobsStarted := []*actions.JobStarted{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobStarted,
},
RunnerRequestId: 5,
},
RunnerId: 2,
RunnerName: "runner2",
},
}
for _, msg := range jobsStarted {
batchedMessages = append(batchedMessages, msg)
}
jobsCompleted := []*actions.JobCompleted{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobCompleted,
},
RunnerRequestId: 6,
},
Result: "success",
RunnerId: 1,
RunnerName: "runner1",
},
}
for _, msg := range jobsCompleted {
batchedMessages = append(batchedMessages, msg)
}
b, err := json.Marshal(batchedMessages)
require.NoError(t, err)
msg.Body = string(b)
l := &Listener{}
parsedMsg, err := l.parseMessage(context.Background(), msg)
require.NoError(t, err)
assert.Equal(t, msg.Statistics, parsedMsg.statistics)
assert.Equal(t, jobsAvailable, parsedMsg.jobsAvailable)
assert.Equal(t, jobsStarted, parsedMsg.jobsStarted)
assert.Equal(t, jobsCompleted, parsedMsg.jobsCompleted)
})
}

View File

@@ -1,205 +0,0 @@
package listener
import (
"context"
"encoding/json"
"testing"
listenermocks "github.com/actions/actions-runner-controller/cmd/ghalistener/listener/mocks"
metricsmocks "github.com/actions/actions-runner-controller/cmd/ghalistener/metrics/mocks"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestInitialMetrics(t *testing.T) {
t.Parallel()
t.Run("SetStaticMetrics", func(t *testing.T) {
t.Parallel()
metrics := metricsmocks.NewPublisher(t)
minRunners := 5
maxRunners := 10
metrics.On("PublishStatic", minRunners, maxRunners).Once()
config := Config{
Client: listenermocks.NewClient(t),
ScaleSetID: 1,
Metrics: metrics,
MinRunners: minRunners,
MaxRunners: maxRunners,
}
l, err := New(config)
assert.Nil(t, err)
assert.NotNil(t, l)
})
t.Run("InitialMessageStatistics", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
sessionStatistics := &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
TotalAcquiredJobs: 2,
TotalAssignedJobs: 3,
TotalRunningJobs: 4,
TotalRegisteredRunners: 5,
TotalBusyRunners: 6,
TotalIdleRunners: 7,
}
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: sessionStatistics,
}
metrics := metricsmocks.NewPublisher(t)
metrics.On("PublishStatic", mock.Anything, mock.Anything).Once()
metrics.On("PublishStatistics", sessionStatistics).Once()
metrics.On("PublishDesiredRunners", sessionStatistics.TotalAssignedJobs).
Run(
func(mock.Arguments) {
cancel()
},
).Once()
config := Config{
Client: listenermocks.NewClient(t),
ScaleSetID: 1,
Metrics: metrics,
}
client := listenermocks.NewClient(t)
client.On("CreateMessageSession", mock.Anything, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessageSession", mock.Anything, session.RunnerScaleSet.Id, session.SessionId).Return(nil).Once()
config.Client = client
handler := listenermocks.NewHandler(t)
handler.On("HandleDesiredRunnerCount", mock.Anything, sessionStatistics.TotalAssignedJobs, 0).
Return(sessionStatistics.TotalAssignedJobs, nil).
Once()
l, err := New(config)
assert.Nil(t, err)
assert.NotNil(t, l)
assert.ErrorIs(t, context.Canceled, l.Listen(ctx, handler))
})
}
func TestHandleMessageMetrics(t *testing.T) {
t.Parallel()
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Body: "",
Statistics: &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
TotalAcquiredJobs: 2,
TotalAssignedJobs: 3,
TotalRunningJobs: 4,
TotalRegisteredRunners: 5,
TotalBusyRunners: 6,
TotalIdleRunners: 7,
},
}
var batchedMessages []any
jobsStarted := []*actions.JobStarted{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobStarted,
},
RunnerRequestId: 8,
},
RunnerId: 3,
RunnerName: "runner3",
},
}
for _, msg := range jobsStarted {
batchedMessages = append(batchedMessages, msg)
}
jobsCompleted := []*actions.JobCompleted{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobCompleted,
},
RunnerRequestId: 6,
},
Result: "success",
RunnerId: 1,
RunnerName: "runner1",
},
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobCompleted,
},
RunnerRequestId: 7,
},
Result: "success",
RunnerId: 2,
RunnerName: "runner2",
},
}
for _, msg := range jobsCompleted {
batchedMessages = append(batchedMessages, msg)
}
b, err := json.Marshal(batchedMessages)
require.NoError(t, err)
msg.Body = string(b)
desiredResult := 4
metrics := metricsmocks.NewPublisher(t)
metrics.On("PublishStatic", 0, 0).Once()
metrics.On("PublishStatistics", msg.Statistics).Once()
metrics.On("PublishJobCompleted", jobsCompleted[0]).Once()
metrics.On("PublishJobCompleted", jobsCompleted[1]).Once()
metrics.On("PublishJobStarted", jobsStarted[0]).Once()
metrics.On("PublishDesiredRunners", desiredResult).Once()
handler := listenermocks.NewHandler(t)
handler.On("HandleJobStarted", mock.Anything, jobsStarted[0]).Return(nil).Once()
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 2).Return(desiredResult, nil).Once()
client := listenermocks.NewClient(t)
client.On("DeleteMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
config := Config{
Client: listenermocks.NewClient(t),
ScaleSetID: 1,
Metrics: metrics,
}
l, err := New(config)
require.NoError(t, err)
l.client = client
l.session = &actions.RunnerScaleSetSession{
OwnerName: "",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "",
MessageQueueAccessToken: "",
Statistics: &actions.RunnerScaleSetStatistic{},
}
err = l.handleMessage(context.Background(), handler, msg)
require.NoError(t, err)
}

View File

@@ -1,190 +0,0 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package mocks
import (
context "context"
actions "github.com/actions/actions-runner-controller/github/actions"
mock "github.com/stretchr/testify/mock"
uuid "github.com/google/uuid"
)
// Client is an autogenerated mock type for the Client type
type Client struct {
mock.Mock
}
// AcquireJobs provides a mock function with given fields: ctx, runnerScaleSetId, messageQueueAccessToken, requestIds
func (_m *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) {
ret := _m.Called(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
var r0 []int64
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, string, []int64) ([]int64, error)); ok {
return rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
}
if rf, ok := ret.Get(0).(func(context.Context, int, string, []int64) []int64); ok {
r0 = rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]int64)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int, string, []int64) error); ok {
r1 = rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CreateMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, owner
func (_m *Client) CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*actions.RunnerScaleSetSession, error) {
ret := _m.Called(ctx, runnerScaleSetId, owner)
var r0 *actions.RunnerScaleSetSession
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, string) (*actions.RunnerScaleSetSession, error)); ok {
return rf(ctx, runnerScaleSetId, owner)
}
if rf, ok := ret.Get(0).(func(context.Context, int, string) *actions.RunnerScaleSetSession); ok {
r0 = rf(ctx, runnerScaleSetId, owner)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*actions.RunnerScaleSetSession)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int, string) error); ok {
r1 = rf(ctx, runnerScaleSetId, owner)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DeleteMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, messageId
func (_m *Client) DeleteMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, messageId int64) error {
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, messageId)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) error); ok {
r0 = rf(ctx, messageQueueUrl, messageQueueAccessToken, messageId)
} else {
r0 = ret.Error(0)
}
return r0
}
// DeleteMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
func (_m *Client) DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error {
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) error); ok {
r0 = rf(ctx, runnerScaleSetId, sessionId)
} else {
r0 = ret.Error(0)
}
return r0
}
// GetAcquirableJobs provides a mock function with given fields: ctx, runnerScaleSetId
func (_m *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*actions.AcquirableJobList, error) {
ret := _m.Called(ctx, runnerScaleSetId)
var r0 *actions.AcquirableJobList
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int) (*actions.AcquirableJobList, error)); ok {
return rf(ctx, runnerScaleSetId)
}
if rf, ok := ret.Get(0).(func(context.Context, int) *actions.AcquirableJobList); ok {
r0 = rf(ctx, runnerScaleSetId)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*actions.AcquirableJobList)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int) error); ok {
r1 = rf(ctx, runnerScaleSetId)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId
func (_m *Client) GetMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, lastMessageId int64) (*actions.RunnerScaleSetMessage, error) {
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId)
var r0 *actions.RunnerScaleSetMessage
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) (*actions.RunnerScaleSetMessage, error)); ok {
return rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId)
}
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) *actions.RunnerScaleSetMessage); ok {
r0 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*actions.RunnerScaleSetMessage)
}
}
if rf, ok := ret.Get(1).(func(context.Context, string, string, int64) error); ok {
r1 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// RefreshMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
func (_m *Client) RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*actions.RunnerScaleSetSession, error) {
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
var r0 *actions.RunnerScaleSetSession
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) (*actions.RunnerScaleSetSession, error)); ok {
return rf(ctx, runnerScaleSetId, sessionId)
}
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) *actions.RunnerScaleSetSession); ok {
r0 = rf(ctx, runnerScaleSetId, sessionId)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*actions.RunnerScaleSetSession)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int, *uuid.UUID) error); ok {
r1 = rf(ctx, runnerScaleSetId, sessionId)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewClient(t interface {
mock.TestingT
Cleanup(func())
}) *Client {
mock := &Client{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -1,68 +0,0 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package mocks
import (
context "context"
actions "github.com/actions/actions-runner-controller/github/actions"
mock "github.com/stretchr/testify/mock"
)
// Handler is an autogenerated mock type for the Handler type
type Handler struct {
mock.Mock
}
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count, jobsCompleted
func (_m *Handler) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error) {
ret := _m.Called(ctx, count, jobsCompleted)
var r0 int
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, int) (int, error)); ok {
return rf(ctx, count, jobsCompleted)
}
if rf, ok := ret.Get(0).(func(context.Context, int, int) int); ok {
r0 = rf(ctx, count, jobsCompleted)
} else {
r0 = ret.Get(0).(int)
}
if rf, ok := ret.Get(1).(func(context.Context, int, int) error); ok {
r1 = rf(ctx, count, jobsCompleted)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// HandleJobStarted provides a mock function with given fields: ctx, jobInfo
func (_m *Handler) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
ret := _m.Called(ctx, jobInfo)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *actions.JobStarted) error); ok {
r0 = rf(ctx, jobInfo)
} else {
r0 = ret.Error(0)
}
return r0
}
// NewHandler creates a new instance of Handler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewHandler(t interface {
mock.TestingT
Cleanup(func())
}) *Handler {
mock := &Handler{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -1,40 +0,0 @@
package main
import (
"context"
"fmt"
"log"
"os"
"os/signal"
"syscall"
"github.com/actions/actions-runner-controller/cmd/ghalistener/app"
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
)
func main() {
configPath, ok := os.LookupEnv("LISTENER_CONFIG_PATH")
if !ok {
fmt.Fprintf(os.Stderr, "Error: LISTENER_CONFIG_PATH environment variable is not set\n")
os.Exit(1)
}
config, err := config.Read(configPath)
if err != nil {
log.Printf("Failed to read config: %v", err)
os.Exit(1)
}
app, err := app.New(config)
if err != nil {
log.Printf("Failed to initialize app: %v", err)
os.Exit(1)
}
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer stop()
if err := app.Run(ctx); err != nil {
log.Printf("Application returned an error: %v", err)
os.Exit(1)
}
}

View File

@@ -1,389 +0,0 @@
package metrics
import (
"context"
"net/http"
"strconv"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
const (
labelKeyRunnerScaleSetName = "name"
labelKeyRunnerScaleSetNamespace = "namespace"
labelKeyEnterprise = "enterprise"
labelKeyOrganization = "organization"
labelKeyRepository = "repository"
labelKeyJobName = "job_name"
labelKeyJobWorkflowRef = "job_workflow_ref"
labelKeyEventName = "event_name"
labelKeyJobResult = "job_result"
labelKeyRunnerID = "runner_id"
labelKeyRunnerName = "runner_name"
)
const githubScaleSetSubsystem = "gha"
// labels
var (
scaleSetLabels = []string{
labelKeyRunnerScaleSetName,
labelKeyRepository,
labelKeyOrganization,
labelKeyEnterprise,
labelKeyRunnerScaleSetNamespace,
}
jobLabels = []string{
labelKeyRepository,
labelKeyOrganization,
labelKeyEnterprise,
labelKeyJobName,
labelKeyJobWorkflowRef,
labelKeyEventName,
}
completedJobsTotalLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
jobExecutionDurationLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
startedJobsTotalLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
jobStartupDurationLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
)
var (
assignedJobs = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "assigned_jobs",
Help: "Number of jobs assigned to this scale set.",
},
scaleSetLabels,
)
runningJobs = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "running_jobs",
Help: "Number of jobs running (or about to be run).",
},
scaleSetLabels,
)
registeredRunners = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "registered_runners",
Help: "Number of runners registered by the scale set.",
},
scaleSetLabels,
)
busyRunners = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "busy_runners",
Help: "Number of registered runners running a job.",
},
scaleSetLabels,
)
minRunners = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "min_runners",
Help: "Minimum number of runners.",
},
scaleSetLabels,
)
maxRunners = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "max_runners",
Help: "Maximum number of runners.",
},
scaleSetLabels,
)
desiredRunners = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "desired_runners",
Help: "Number of runners desired by the scale set.",
},
scaleSetLabels,
)
idleRunners = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "idle_runners",
Help: "Number of registered runners not running a job.",
},
scaleSetLabels,
)
startedJobsTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Subsystem: githubScaleSetSubsystem,
Name: "started_jobs_total",
Help: "Total number of jobs started.",
},
startedJobsTotalLabels,
)
completedJobsTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "completed_jobs_total",
Help: "Total number of jobs completed.",
Subsystem: githubScaleSetSubsystem,
},
completedJobsTotalLabels,
)
jobStartupDurationSeconds = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Subsystem: githubScaleSetSubsystem,
Name: "job_startup_duration_seconds",
Help: "Time spent waiting for workflow job to get started on the runner owned by the scale set (in seconds).",
Buckets: runtimeBuckets,
},
jobStartupDurationLabels,
)
jobExecutionDurationSeconds = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Subsystem: githubScaleSetSubsystem,
Name: "job_execution_duration_seconds",
Help: "Time spent executing workflow jobs by the scale set (in seconds).",
Buckets: runtimeBuckets,
},
jobExecutionDurationLabels,
)
)
var runtimeBuckets []float64 = []float64{
0.01,
0.05,
0.1,
0.5,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
12,
15,
18,
20,
25,
30,
40,
50,
60,
70,
80,
90,
100,
110,
120,
150,
180,
210,
240,
300,
360,
420,
480,
540,
600,
900,
1200,
1800,
2400,
3000,
3600,
}
type baseLabels struct {
scaleSetName string
scaleSetNamespace string
enterprise string
organization string
repository string
}
func (b *baseLabels) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels {
return prometheus.Labels{
labelKeyEnterprise: b.enterprise,
labelKeyOrganization: jobBase.OwnerName,
labelKeyRepository: jobBase.RepositoryName,
labelKeyJobName: jobBase.JobDisplayName,
labelKeyJobWorkflowRef: jobBase.JobWorkflowRef,
labelKeyEventName: jobBase.EventName,
}
}
func (b *baseLabels) scaleSetLabels() prometheus.Labels {
return prometheus.Labels{
labelKeyRunnerScaleSetName: b.scaleSetName,
labelKeyRunnerScaleSetNamespace: b.scaleSetNamespace,
labelKeyEnterprise: b.enterprise,
labelKeyOrganization: b.organization,
labelKeyRepository: b.repository,
}
}
func (b *baseLabels) completedJobLabels(msg *actions.JobCompleted) prometheus.Labels {
l := b.jobLabels(&msg.JobMessageBase)
l[labelKeyRunnerID] = strconv.Itoa(msg.RunnerId)
l[labelKeyJobResult] = msg.Result
l[labelKeyRunnerName] = msg.RunnerName
return l
}
func (b *baseLabels) startedJobLabels(msg *actions.JobStarted) prometheus.Labels {
l := b.jobLabels(&msg.JobMessageBase)
l[labelKeyRunnerID] = strconv.Itoa(msg.RunnerId)
l[labelKeyRunnerName] = msg.RunnerName
return l
}
//go:generate mockery --name Publisher --output ./mocks --outpkg mocks --case underscore
type Publisher interface {
PublishStatic(min, max int)
PublishStatistics(stats *actions.RunnerScaleSetStatistic)
PublishJobStarted(msg *actions.JobStarted)
PublishJobCompleted(msg *actions.JobCompleted)
PublishDesiredRunners(count int)
}
//go:generate mockery --name ServerPublisher --output ./mocks --outpkg mocks --case underscore
type ServerPublisher interface {
Publisher
ListenAndServe(ctx context.Context) error
}
var (
_ Publisher = &discard{}
_ ServerPublisher = &exporter{}
)
var Discard Publisher = &discard{}
type exporter struct {
logger logr.Logger
baseLabels
srv *http.Server
}
type ExporterConfig struct {
ScaleSetName string
ScaleSetNamespace string
Enterprise string
Organization string
Repository string
ServerAddr string
ServerEndpoint string
Logger logr.Logger
}
func NewExporter(config ExporterConfig) ServerPublisher {
reg := prometheus.NewRegistry()
reg.MustRegister(
assignedJobs,
runningJobs,
registeredRunners,
busyRunners,
minRunners,
maxRunners,
desiredRunners,
idleRunners,
startedJobsTotal,
completedJobsTotal,
jobStartupDurationSeconds,
jobExecutionDurationSeconds,
)
mux := http.NewServeMux()
mux.Handle(
config.ServerEndpoint,
promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}),
)
return &exporter{
logger: config.Logger.WithName("metrics"),
baseLabels: baseLabels{
scaleSetName: config.ScaleSetName,
scaleSetNamespace: config.ScaleSetNamespace,
enterprise: config.Enterprise,
organization: config.Organization,
repository: config.Repository,
},
srv: &http.Server{
Addr: config.ServerAddr,
Handler: mux,
},
}
}
func (e *exporter) ListenAndServe(ctx context.Context) error {
e.logger.Info("starting metrics server", "addr", e.srv.Addr)
go func() {
<-ctx.Done()
e.logger.Info("stopping metrics server")
e.srv.Shutdown(ctx)
}()
return e.srv.ListenAndServe()
}
func (m *exporter) PublishStatic(min, max int) {
l := m.scaleSetLabels()
maxRunners.With(l).Set(float64(max))
minRunners.With(l).Set(float64(min))
}
func (e *exporter) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
l := e.scaleSetLabels()
assignedJobs.With(l).Set(float64(stats.TotalAssignedJobs))
runningJobs.With(l).Set(float64(stats.TotalRunningJobs))
registeredRunners.With(l).Set(float64(stats.TotalRegisteredRunners))
busyRunners.With(l).Set(float64(stats.TotalBusyRunners))
idleRunners.With(l).Set(float64(stats.TotalIdleRunners))
}
func (e *exporter) PublishJobStarted(msg *actions.JobStarted) {
l := e.startedJobLabels(msg)
startedJobsTotal.With(l).Inc()
startupDuration := msg.JobMessageBase.RunnerAssignTime.Unix() - msg.JobMessageBase.ScaleSetAssignTime.Unix()
jobStartupDurationSeconds.With(l).Observe(float64(startupDuration))
}
func (e *exporter) PublishJobCompleted(msg *actions.JobCompleted) {
l := e.completedJobLabels(msg)
completedJobsTotal.With(l).Inc()
executionDuration := msg.JobMessageBase.FinishTime.Unix() - msg.JobMessageBase.RunnerAssignTime.Unix()
jobExecutionDurationSeconds.With(l).Observe(float64(executionDuration))
}
func (m *exporter) PublishDesiredRunners(count int) {
desiredRunners.With(m.scaleSetLabels()).Set(float64(count))
}
type discard struct{}
func (*discard) PublishStatic(int, int) {}
func (*discard) PublishStatistics(*actions.RunnerScaleSetStatistic) {}
func (*discard) PublishJobStarted(*actions.JobStarted) {}
func (*discard) PublishJobCompleted(*actions.JobCompleted) {}
func (*discard) PublishDesiredRunners(int) {}

View File

@@ -1,53 +0,0 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package mocks
import (
actions "github.com/actions/actions-runner-controller/github/actions"
mock "github.com/stretchr/testify/mock"
)
// Publisher is an autogenerated mock type for the Publisher type
type Publisher struct {
mock.Mock
}
// PublishDesiredRunners provides a mock function with given fields: count
func (_m *Publisher) PublishDesiredRunners(count int) {
_m.Called(count)
}
// PublishJobCompleted provides a mock function with given fields: msg
func (_m *Publisher) PublishJobCompleted(msg *actions.JobCompleted) {
_m.Called(msg)
}
// PublishJobStarted provides a mock function with given fields: msg
func (_m *Publisher) PublishJobStarted(msg *actions.JobStarted) {
_m.Called(msg)
}
// PublishStatic provides a mock function with given fields: min, max
func (_m *Publisher) PublishStatic(min int, max int) {
_m.Called(min, max)
}
// PublishStatistics provides a mock function with given fields: stats
func (_m *Publisher) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
_m.Called(stats)
}
// NewPublisher creates a new instance of Publisher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewPublisher(t interface {
mock.TestingT
Cleanup(func())
}) *Publisher {
mock := &Publisher{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -1,69 +0,0 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package mocks
import (
context "context"
actions "github.com/actions/actions-runner-controller/github/actions"
mock "github.com/stretchr/testify/mock"
)
// ServerPublisher is an autogenerated mock type for the ServerPublisher type
type ServerPublisher struct {
mock.Mock
}
// ListenAndServe provides a mock function with given fields: ctx
func (_m *ServerPublisher) ListenAndServe(ctx context.Context) error {
ret := _m.Called(ctx)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// PublishDesiredRunners provides a mock function with given fields: count
func (_m *ServerPublisher) PublishDesiredRunners(count int) {
_m.Called(count)
}
// PublishJobCompleted provides a mock function with given fields: msg
func (_m *ServerPublisher) PublishJobCompleted(msg *actions.JobCompleted) {
_m.Called(msg)
}
// PublishJobStarted provides a mock function with given fields: msg
func (_m *ServerPublisher) PublishJobStarted(msg *actions.JobStarted) {
_m.Called(msg)
}
// PublishStatic provides a mock function with given fields: min, max
func (_m *ServerPublisher) PublishStatic(min int, max int) {
_m.Called(min, max)
}
// PublishStatistics provides a mock function with given fields: stats
func (_m *ServerPublisher) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
_m.Called(stats)
}
// NewServerPublisher creates a new instance of ServerPublisher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewServerPublisher(t interface {
mock.TestingT
Cleanup(func())
}) *ServerPublisher {
mock := &ServerPublisher{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -1,242 +0,0 @@
package worker
import (
"context"
"encoding/json"
"fmt"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/logging"
jsonpatch "github.com/evanphx/json-patch"
"github.com/go-logr/logr"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
const workerName = "kubernetesworker"
type Option func(*Worker)
func WithLogger(logger logr.Logger) Option {
return func(w *Worker) {
logger = logger.WithName(workerName)
w.logger = &logger
}
}
type Config struct {
EphemeralRunnerSetNamespace string
EphemeralRunnerSetName string
MaxRunners int
MinRunners int
}
// The Worker's role is to process the messages it receives from the listener.
// It then initiates Kubernetes API requests to carry out the necessary actions.
type Worker struct {
clientset *kubernetes.Clientset
config Config
lastPatch int
lastPatchID int
logger *logr.Logger
}
var _ listener.Handler = (*Worker)(nil)
func New(config Config, options ...Option) (*Worker, error) {
w := &Worker{
config: config,
lastPatch: -1,
lastPatchID: -1,
}
conf, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
clientset, err := kubernetes.NewForConfig(conf)
if err != nil {
return nil, err
}
w.clientset = clientset
for _, option := range options {
option(w)
}
if err := w.applyDefaults(); err != nil {
return nil, err
}
return w, nil
}
func (w *Worker) applyDefaults() error {
if w.logger == nil {
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatJSON)
if err != nil {
return fmt.Errorf("NewLogger failed: %w", err)
}
logger = logger.WithName(workerName)
w.logger = &logger
}
return nil
}
// HandleJobStarted updates the job information for the ephemeral runner when a job is started.
// It takes a context and a jobInfo parameter which contains the details of the started job.
// This update marks the ephemeral runner so that the controller would have more context
// about the ephemeral runner that should not be deleted when scaling down.
// It returns an error if there is any issue with updating the job information.
func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
w.logger.Info("Updating job info for the runner",
"runnerName", jobInfo.RunnerName,
"ownerName", jobInfo.OwnerName,
"repoName", jobInfo.RepositoryName,
"workflowRef", jobInfo.JobWorkflowRef,
"workflowRunId", jobInfo.WorkflowRunId,
"jobDisplayName", jobInfo.JobDisplayName,
"requestId", jobInfo.RunnerRequestId)
original, err := json.Marshal(&v1alpha1.EphemeralRunner{})
if err != nil {
return fmt.Errorf("failed to marshal empty ephemeral runner: %w", err)
}
patch, err := json.Marshal(
&v1alpha1.EphemeralRunner{
Status: v1alpha1.EphemeralRunnerStatus{
JobRequestId: jobInfo.RunnerRequestId,
JobRepositoryName: fmt.Sprintf("%s/%s", jobInfo.OwnerName, jobInfo.RepositoryName),
WorkflowRunId: jobInfo.WorkflowRunId,
JobWorkflowRef: jobInfo.JobWorkflowRef,
JobDisplayName: jobInfo.JobDisplayName,
},
},
)
if err != nil {
return fmt.Errorf("failed to marshal ephemeral runner patch: %w", err)
}
mergePatch, err := jsonpatch.CreateMergePatch(original, patch)
if err != nil {
return fmt.Errorf("failed to create merge patch json for ephemeral runner: %w", err)
}
w.logger.Info("Updating ephemeral runner with merge patch", "json", string(mergePatch))
patchedStatus := &v1alpha1.EphemeralRunner{}
err = w.clientset.RESTClient().
Patch(types.MergePatchType).
Prefix("apis", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version).
Namespace(w.config.EphemeralRunnerSetNamespace).
Resource("EphemeralRunners").
Name(jobInfo.RunnerName).
SubResource("status").
Body(mergePatch).
Do(ctx).
Into(patchedStatus)
if err != nil {
if kerrors.IsNotFound(err) {
w.logger.Info("Ephemeral runner not found, skipping patching of ephemeral runner status", "runnerName", jobInfo.RunnerName)
return nil
}
return fmt.Errorf("could not patch ephemeral runner status, patch JSON: %s, error: %w", string(mergePatch), err)
}
w.logger.Info("Ephemeral runner status updated with the merge patch successfully.")
return nil
}
// HandleDesiredRunnerCount handles the desired runner count by scaling the ephemeral runner set.
// The function calculates the target runner count based on the minimum and maximum runner count configuration.
// If the target runner count is the same as the last patched count, it skips patching and returns nil.
// Otherwise, it creates a merge patch JSON for updating the ephemeral runner set with the desired count.
// The function then scales the ephemeral runner set by applying the merge patch.
// Finally, it logs the scaled ephemeral runner set details and returns nil if successful.
// If any error occurs during the process, it returns an error with a descriptive message.
func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error) {
// Max runners should always be set by the resource builder either to the configured value,
// or the maximum int32 (resourcebuilder.newAutoScalingListener()).
targetRunnerCount := min(w.config.MinRunners+count, w.config.MaxRunners)
logValues := []any{
"assigned job", count,
"decision", targetRunnerCount,
"min", w.config.MinRunners,
"max", w.config.MaxRunners,
"currentRunnerCount", w.lastPatch,
"jobsCompleted", jobsCompleted,
}
if w.lastPatch == targetRunnerCount && jobsCompleted == 0 {
w.logger.Info("Skipping patch", logValues...)
return targetRunnerCount, nil
}
w.lastPatchID++
w.lastPatch = targetRunnerCount
original, err := json.Marshal(
&v1alpha1.EphemeralRunnerSet{
Spec: v1alpha1.EphemeralRunnerSetSpec{
Replicas: -1,
PatchID: -1,
},
},
)
if err != nil {
return 0, fmt.Errorf("failed to marshal empty ephemeral runner set: %w", err)
}
patch, err := json.Marshal(
&v1alpha1.EphemeralRunnerSet{
Spec: v1alpha1.EphemeralRunnerSetSpec{
Replicas: targetRunnerCount,
PatchID: w.lastPatchID,
},
},
)
if err != nil {
w.logger.Error(err, "could not marshal patch ephemeral runner set")
return 0, err
}
mergePatch, err := jsonpatch.CreateMergePatch(original, patch)
if err != nil {
return 0, fmt.Errorf("failed to create merge patch json for ephemeral runner set: %w", err)
}
w.logger.Info("Created merge patch json for EphemeralRunnerSet update", "json", string(mergePatch))
w.logger.Info("Scaling ephemeral runner set", logValues...)
patchedEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{}
err = w.clientset.RESTClient().
Patch(types.MergePatchType).
Prefix("apis", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version).
Namespace(w.config.EphemeralRunnerSetNamespace).
Resource("ephemeralrunnersets").
Name(w.config.EphemeralRunnerSetName).
Body([]byte(mergePatch)).
Do(ctx).
Into(patchedEphemeralRunnerSet)
if err != nil {
return 0, fmt.Errorf("could not patch ephemeral runner set , patch JSON: %s, error: %w", string(mergePatch), err)
}
w.logger.Info("Ephemeral runner set scaled.",
"namespace", w.config.EphemeralRunnerSetNamespace,
"name", w.config.EphemeralRunnerSetName,
"replicas", patchedEphemeralRunnerSet.Spec.Replicas,
)
return targetRunnerCount, nil
}

View File

@@ -5,9 +5,9 @@ import (
"encoding/json"
"errors"
"fmt"
"math"
"strings"
"github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
)
@@ -30,7 +30,7 @@ type Service struct {
errs []error
}
func WithPrometheusMetrics(conf config.Config) func(*Service) {
func WithPrometheusMetrics(conf RunnerScaleSetListenerConfig) func(*Service) {
return func(svc *Service) {
parsedURL, err := actions.ParseGitHubConfigFromURL(conf.ConfigureUrl)
if err != nil {
@@ -81,7 +81,6 @@ func NewService(
}
func (s *Service) Start() error {
s.metricsExporter.publishStatic(s.settings.MaxRunners, s.settings.MinRunners)
for {
s.logger.Info("waiting for message...")
select {
@@ -205,9 +204,7 @@ func (s *Service) processMessage(message *actions.RunnerScaleSetMessage) error {
}
func (s *Service) scaleForAssignedJobCount(count int) error {
// Max runners should always be set by the resource builder either to the configured value,
// or the maximum int32 (resourcebuilder.newAutoScalingListener()).
targetRunnerCount := min(s.settings.MinRunners+count, s.settings.MaxRunners)
targetRunnerCount := int(math.Max(math.Min(float64(s.settings.MaxRunners), float64(count)), float64(s.settings.MinRunners)))
s.metricsExporter.publishDesiredRunners(targetRunnerCount)
if targetRunnerCount != s.currentRunnerCount {
s.logger.Info("try scale runner request up/down base on assigned job count",

View File

@@ -397,7 +397,7 @@ func TestProcessMessage_MultipleMessages(t *testing.T) {
require.NoError(t, err)
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 3 && ids[1] == 4 })).Return(nil).Once()
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 3).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
err = service.processMessage(&actions.RunnerScaleSetMessage{
MessageId: 1,
@@ -523,9 +523,9 @@ func TestScaleForAssignedJobCount_ScaleWithinMinMax(t *testing.T) {
require.NoError(t, err)
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 1).Return(nil).Once()
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 4).Return(nil).Once()
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 3).Return(nil).Once()
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(nil).Once()
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 1).Return(nil).Once()
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
err = service.scaleForAssignedJobCount(0)
@@ -569,7 +569,7 @@ func TestScaleForAssignedJobCount_ScaleFailed(t *testing.T) {
)
require.NoError(t, err)
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 3).Return(fmt.Errorf("error"))
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(fmt.Errorf("error"))
err = service.scaleForAssignedJobCount(2)
@@ -605,23 +605,8 @@ func TestProcessMessage_JobStartedMessage(t *testing.T) {
service.currentRunnerCount = 1
mockKubeManager.On(
"UpdateEphemeralRunnerWithJobInfo",
ctx,
service.settings.Namespace,
"runner1",
"owner1",
"repo1",
".github/workflows/ci.yaml",
"job1",
int64(100),
int64(3),
).Run(
func(_ mock.Arguments) { cancel() },
).Return(nil).Once()
mockKubeManager.On("UpdateEphemeralRunnerWithJobInfo", ctx, service.settings.Namespace, "runner1", "owner1", "repo1", ".github/workflows/ci.yaml", "job1", int64(100), int64(3)).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return len(ids) == 0 })).Return(nil).Once()
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(nil)
err = service.processMessage(&actions.RunnerScaleSetMessage{
MessageId: 1,

View File

@@ -1,76 +0,0 @@
package config
import (
"encoding/json"
"fmt"
"os"
)
type Config struct {
ConfigureUrl string `json:"configureUrl"`
AppID int64 `json:"appID"`
AppInstallationID int64 `json:"appInstallationID"`
AppPrivateKey string `json:"appPrivateKey"`
Token string `json:"token"`
EphemeralRunnerSetNamespace string `json:"ephemeralRunnerSetNamespace"`
EphemeralRunnerSetName string `json:"ephemeralRunnerSetName"`
MaxRunners int `json:"maxRunners"`
MinRunners int `json:"minRunners"`
RunnerScaleSetId int `json:"runnerScaleSetId"`
RunnerScaleSetName string `json:"runnerScaleSetName"`
ServerRootCA string `json:"serverRootCA"`
LogLevel string `json:"logLevel"`
LogFormat string `json:"logFormat"`
MetricsAddr string `json:"metricsAddr"`
MetricsEndpoint string `json:"metricsEndpoint"`
}
func Read(path string) (Config, error) {
f, err := os.Open(path)
if err != nil {
return Config{}, err
}
defer f.Close()
var config Config
if err := json.NewDecoder(f).Decode(&config); err != nil {
return Config{}, fmt.Errorf("failed to decode config: %w", err)
}
if err := config.validate(); err != nil {
return Config{}, fmt.Errorf("failed to validate config: %w", err)
}
return config, nil
}
func (c *Config) validate() error {
if len(c.ConfigureUrl) == 0 {
return fmt.Errorf("GitHubConfigUrl is not provided")
}
if len(c.EphemeralRunnerSetNamespace) == 0 || len(c.EphemeralRunnerSetName) == 0 {
return fmt.Errorf("EphemeralRunnerSetNamespace '%s' or EphemeralRunnerSetName '%s' is missing", c.EphemeralRunnerSetNamespace, c.EphemeralRunnerSetName)
}
if c.RunnerScaleSetId == 0 {
return fmt.Errorf("RunnerScaleSetId '%d' is missing", c.RunnerScaleSetId)
}
if c.MaxRunners < c.MinRunners {
return fmt.Errorf("MinRunners '%d' cannot be greater than MaxRunners '%d'", c.MinRunners, c.MaxRunners)
}
hasToken := len(c.Token) > 0
hasPrivateKeyConfig := c.AppID > 0 && c.AppPrivateKey != ""
if !hasToken && !hasPrivateKeyConfig {
return fmt.Errorf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
}
if hasToken && hasPrivateKeyConfig {
return fmt.Errorf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
}
return nil
}

View File

@@ -1,92 +0,0 @@
package config
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestConfigValidationMinMax(t *testing.T) {
config := &Config{
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 5,
MaxRunners: 2,
Token: "token",
}
err := config.validate()
assert.ErrorContains(t, err, "MinRunners '5' cannot be greater than MaxRunners '2", "Expected error about MinRunners > MaxRunners")
}
func TestConfigValidationMissingToken(t *testing.T) {
config := &Config{
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.validate()
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidationAppKey(t *testing.T) {
config := &Config{
AppID: 1,
AppInstallationID: 10,
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.validate()
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) {
config := &Config{
AppID: 1,
AppInstallationID: 10,
AppPrivateKey: "asdf",
Token: "asdf",
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.validate()
expectedError := fmt.Sprintf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidation(t *testing.T) {
config := &Config{
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 1,
MaxRunners: 5,
Token: "asdf",
}
err := config.validate()
assert.NoError(t, err, "Expected no error")
}
func TestConfigValidationConfigUrl(t *testing.T) {
config := &Config{
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.validate()
assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl")
}

View File

@@ -28,26 +28,39 @@ import (
"time"
"github.com/actions/actions-runner-controller/build"
"github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/logging"
"github.com/go-logr/logr"
"github.com/kelseyhightower/envconfig"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"golang.org/x/net/http/httpproxy"
"golang.org/x/sync/errgroup"
)
func main() {
configPath, ok := os.LookupEnv("LISTENER_CONFIG_PATH")
if !ok {
fmt.Fprintf(os.Stderr, "Error: LISTENER_CONFIG_PATH environment variable is not set\n")
os.Exit(1)
}
type RunnerScaleSetListenerConfig struct {
ConfigureUrl string `split_words:"true"`
AppID int64 `split_words:"true"`
AppInstallationID int64 `split_words:"true"`
AppPrivateKey string `split_words:"true"`
Token string `split_words:"true"`
EphemeralRunnerSetNamespace string `split_words:"true"`
EphemeralRunnerSetName string `split_words:"true"`
MaxRunners int `split_words:"true"`
MinRunners int `split_words:"true"`
RunnerScaleSetId int `split_words:"true"`
RunnerScaleSetName string `split_words:"true"`
ServerRootCA string `split_words:"true"`
LogLevel string `split_words:"true"`
LogFormat string `split_words:"true"`
MetricsAddr string `split_words:"true"`
MetricsEndpoint string `split_words:"true"`
}
rc, err := config.Read(configPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: reading config from path(%q): %v\n", configPath, err)
func main() {
var rc RunnerScaleSetListenerConfig
if err := envconfig.Process("github", &rc); err != nil {
fmt.Fprintf(os.Stderr, "Error: processing environment variables for RunnerScaleSetListenerConfig: %v\n", err)
os.Exit(1)
}
@@ -67,6 +80,12 @@ func main() {
os.Exit(1)
}
// Validate all inputs
if err := validateConfig(&rc); err != nil {
logger.Error(err, "Inputs validation failed")
os.Exit(1)
}
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer stop()
@@ -104,7 +123,7 @@ func main() {
}
type metricsServer struct {
rc config.Config
rc RunnerScaleSetListenerConfig
logger logr.Logger
srv *http.Server
}
@@ -154,7 +173,7 @@ type runOptions struct {
serviceOptions []func(*Service)
}
func run(ctx context.Context, rc config.Config, logger logr.Logger, opts runOptions) error {
func run(ctx context.Context, rc RunnerScaleSetListenerConfig, logger logr.Logger, opts runOptions) error {
// Create root context and hook with sigint and sigterm
creds := &actions.ActionsAuth{}
if rc.Token != "" {
@@ -176,8 +195,6 @@ func run(ctx context.Context, rc config.Config, logger logr.Logger, opts runOpti
Version: build.Version,
CommitSHA: build.CommitSHA,
ScaleSetID: rc.RunnerScaleSetId,
HasProxy: hasProxy(),
Subsystem: "githubrunnerscalesetlistener",
})
if err != nil {
return fmt.Errorf("failed to create an Actions Service client: %w", err)
@@ -215,7 +232,38 @@ func run(ctx context.Context, rc config.Config, logger logr.Logger, opts runOpti
return nil
}
func newActionsClientFromConfig(config config.Config, creds *actions.ActionsAuth, options ...actions.ClientOption) (*actions.Client, error) {
func validateConfig(config *RunnerScaleSetListenerConfig) error {
if len(config.ConfigureUrl) == 0 {
return fmt.Errorf("GitHubConfigUrl is not provided")
}
if len(config.EphemeralRunnerSetNamespace) == 0 || len(config.EphemeralRunnerSetName) == 0 {
return fmt.Errorf("EphemeralRunnerSetNamespace '%s' or EphemeralRunnerSetName '%s' is missing", config.EphemeralRunnerSetNamespace, config.EphemeralRunnerSetName)
}
if config.RunnerScaleSetId == 0 {
return fmt.Errorf("RunnerScaleSetId '%d' is missing", config.RunnerScaleSetId)
}
if config.MaxRunners < config.MinRunners {
return fmt.Errorf("MinRunners '%d' cannot be greater than MaxRunners '%d'", config.MinRunners, config.MaxRunners)
}
hasToken := len(config.Token) > 0
hasPrivateKeyConfig := config.AppID > 0 && config.AppPrivateKey != ""
if !hasToken && !hasPrivateKeyConfig {
return fmt.Errorf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
}
if hasToken && hasPrivateKeyConfig {
return fmt.Errorf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
}
return nil
}
func newActionsClientFromConfig(config RunnerScaleSetListenerConfig, creds *actions.ActionsAuth, options ...actions.ClientOption) (*actions.Client, error) {
if config.ServerRootCA != "" {
systemPool, err := x509.SystemCertPool()
if err != nil {
@@ -237,8 +285,3 @@ func newActionsClientFromConfig(config config.Config, creds *actions.ActionsAuth
return actions.NewClient(config.ConfigureUrl, creds, options...)
}
func hasProxy() bool {
proxyFunc := httpproxy.FromEnvironment().ProxyFunc()
return proxyFunc != nil
}

View File

@@ -3,6 +3,7 @@ package main
import (
"context"
"crypto/tls"
"fmt"
"net/http"
"net/http/httptest"
"os"
@@ -12,11 +13,94 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/github/actions/testserver"
)
func TestConfigValidationMinMax(t *testing.T) {
config := &RunnerScaleSetListenerConfig{
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 5,
MaxRunners: 2,
Token: "token",
}
err := validateConfig(config)
assert.ErrorContains(t, err, "MinRunners '5' cannot be greater than MaxRunners '2", "Expected error about MinRunners > MaxRunners")
}
func TestConfigValidationMissingToken(t *testing.T) {
config := &RunnerScaleSetListenerConfig{
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := validateConfig(config)
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidationAppKey(t *testing.T) {
config := &RunnerScaleSetListenerConfig{
AppID: 1,
AppInstallationID: 10,
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := validateConfig(config)
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) {
config := &RunnerScaleSetListenerConfig{
AppID: 1,
AppInstallationID: 10,
AppPrivateKey: "asdf",
Token: "asdf",
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := validateConfig(config)
expectedError := fmt.Sprintf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidation(t *testing.T) {
config := &RunnerScaleSetListenerConfig{
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 1,
MaxRunners: 5,
Token: "asdf",
}
err := validateConfig(config)
assert.NoError(t, err, "Expected no error")
}
func TestConfigValidationConfigUrl(t *testing.T) {
config := &RunnerScaleSetListenerConfig{
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := validateConfig(config)
assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl")
}
func TestCustomerServerRootCA(t *testing.T) {
ctx := context.Background()
certsFolder := filepath.Join(
@@ -50,7 +134,7 @@ func TestCustomerServerRootCA(t *testing.T) {
require.NoError(t, err)
certsString = certsString + string(intermediate)
config := config.Config{
config := RunnerScaleSetListenerConfig{
ConfigureUrl: server.ConfigURLForOrg("myorg"),
ServerRootCA: certsString,
}
@@ -80,7 +164,7 @@ func TestProxySettings(t *testing.T) {
os.Setenv("http_proxy", proxy.URL)
defer os.Setenv("http_proxy", prevProxy)
config := config.Config{
config := RunnerScaleSetListenerConfig{
ConfigureUrl: "https://github.com/org/repo",
}
creds := &actions.ActionsAuth{
@@ -112,7 +196,7 @@ func TestProxySettings(t *testing.T) {
os.Setenv("https_proxy", proxy.URL)
defer os.Setenv("https_proxy", prevProxy)
config := config.Config{
config := RunnerScaleSetListenerConfig{
ConfigureUrl: "https://github.com/org/repo",
}
creds := &actions.ActionsAuth{
@@ -149,7 +233,7 @@ func TestProxySettings(t *testing.T) {
os.Setenv("no_proxy", "example.com")
defer os.Setenv("no_proxy", prevNoProxy)
config := config.Config{
config := RunnerScaleSetListenerConfig{
ConfigureUrl: "https://github.com/org/repo",
}
creds := &actions.ActionsAuth{

View File

@@ -1,6 +1,8 @@
package main
import (
"strconv"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/prometheus/client_golang/prometheus"
)
@@ -16,6 +18,8 @@ const (
labelKeyJobWorkflowRef = "job_workflow_ref"
labelKeyEventName = "event_name"
labelKeyJobResult = "job_result"
labelKeyRunnerID = "runner_id"
labelKeyRunnerName = "runner_name"
)
const githubScaleSetSubsystem = "gha"
@@ -39,15 +43,10 @@ var (
labelKeyEventName,
}
completedJobsTotalLabels = append(jobLabels, labelKeyJobResult)
jobExecutionDurationLabels = append(jobLabels, labelKeyJobResult)
startedJobsTotalLabels = jobLabels
jobStartupDurationLabels = []string{
labelKeyRepository,
labelKeyOrganization,
labelKeyEnterprise,
labelKeyEventName,
}
completedJobsTotalLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
jobExecutionDurationLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
startedJobsTotalLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
jobStartupDurationLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
)
// metrics
@@ -275,34 +274,23 @@ func (b *baseLabels) scaleSetLabels() prometheus.Labels {
func (b *baseLabels) completedJobLabels(msg *actions.JobCompleted) prometheus.Labels {
l := b.jobLabels(&msg.JobMessageBase)
l[labelKeyRunnerID] = strconv.Itoa(msg.RunnerId)
l[labelKeyJobResult] = msg.Result
l[labelKeyRunnerName] = msg.RunnerName
return l
}
func (b *baseLabels) startedJobLabels(msg *actions.JobStarted) prometheus.Labels {
l := b.jobLabels(&msg.JobMessageBase)
l[labelKeyRunnerID] = strconv.Itoa(msg.RunnerId)
l[labelKeyRunnerName] = msg.RunnerName
return l
}
func (b *baseLabels) jobStartupDurationLabels(msg *actions.JobStarted) prometheus.Labels {
return prometheus.Labels{
labelKeyEnterprise: b.enterprise,
labelKeyOrganization: b.organization,
labelKeyRepository: b.repository,
labelKeyEventName: msg.EventName,
}
}
func (m *metricsExporter) withBaseLabels(base baseLabels) {
m.baseLabels = base
}
func (m *metricsExporter) publishStatic(max, min int) {
l := m.scaleSetLabels()
maxRunners.With(l).Set(float64(max))
minRunners.With(l).Set(float64(min))
}
func (m *metricsExporter) publishStatistics(stats *actions.RunnerScaleSetStatistic) {
l := m.scaleSetLabels()
@@ -319,7 +307,6 @@ func (m *metricsExporter) publishJobStarted(msg *actions.JobStarted) {
l := m.startedJobLabels(msg)
startedJobsTotal.With(l).Inc()
l = m.jobStartupDurationLabels(msg)
startupDuration := msg.JobMessageBase.RunnerAssignTime.Unix() - msg.JobMessageBase.ScaleSetAssignTime.Unix()
jobStartupDurationSeconds.With(l).Observe(float64(startupDuration))
}

View File

@@ -1,4 +1,4 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
// Code generated by mockery v2.33.2. DO NOT EDIT.
package main

View File

@@ -1,4 +1,4 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
// Code generated by mockery v2.33.2. DO NOT EDIT.
package main

View File

@@ -39,9 +39,6 @@ import (
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"
// +kubebuilder:scaffold:imports
)
@@ -152,19 +149,11 @@ func main() {
syncPeriod := 10 * time.Minute
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
Cache: cache.Options{
SyncPeriod: &syncPeriod,
DefaultNamespaces: map[string]cache.Config{
watchNamespace: {},
},
},
Metrics: metricsserver.Options{
BindAddress: metricsAddr,
},
WebhookServer: webhook.NewServer(webhook.Options{
Port: 9443,
}),
Scheme: scheme,
SyncPeriod: &syncPeriod,
Namespace: watchNamespace,
MetricsBindAddress: metricsAddr,
Port: 9443,
})
if err != nil {
logger.Error(err, "unable to start manager")

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +1,9 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.14.0
controller-gen.kubebuilder.io/version: v0.7.0
creationTimestamp: null
name: horizontalrunnerautoscalers.actions.summerwind.dev
spec:
group: actions.summerwind.dev
@@ -35,19 +35,10 @@ spec:
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
@@ -56,9 +47,7 @@ spec:
properties:
capacityReservations:
items:
description: |-
CapacityReservation specifies the number of replicas temporarily added
to the scale target until ExpirationTime.
description: CapacityReservation specifies the number of replicas temporarily added to the scale target until ExpirationTime.
properties:
effectiveTime:
format: date-time
@@ -90,46 +79,30 @@ spec:
items:
properties:
repositoryNames:
description: |-
RepositoryNames is the list of repository names to be used for calculating the metric.
For example, a repository name is the REPO part of `github.com/USER/REPO`.
description: RepositoryNames is the list of repository names to be used for calculating the metric. For example, a repository name is the REPO part of `github.com/USER/REPO`.
items:
type: string
type: array
scaleDownAdjustment:
description: |-
ScaleDownAdjustment is the number of runners removed on scale-down.
You can only specify either ScaleDownFactor or ScaleDownAdjustment.
description: ScaleDownAdjustment is the number of runners removed on scale-down. You can only specify either ScaleDownFactor or ScaleDownAdjustment.
type: integer
scaleDownFactor:
description: |-
ScaleDownFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be removed.
description: ScaleDownFactor is the multiplicative factor applied to the current number of runners used to determine how many pods should be removed.
type: string
scaleDownThreshold:
description: |-
ScaleDownThreshold is the percentage of busy runners less than which will
trigger the hpa to scale the runners down.
description: ScaleDownThreshold is the percentage of busy runners less than which will trigger the hpa to scale the runners down.
type: string
scaleUpAdjustment:
description: |-
ScaleUpAdjustment is the number of runners added on scale-up.
You can only specify either ScaleUpFactor or ScaleUpAdjustment.
description: ScaleUpAdjustment is the number of runners added on scale-up. You can only specify either ScaleUpFactor or ScaleUpAdjustment.
type: integer
scaleUpFactor:
description: |-
ScaleUpFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be added.
description: ScaleUpFactor is the multiplicative factor applied to the current number of runners used to determine how many pods should be added.
type: string
scaleUpThreshold:
description: |-
ScaleUpThreshold is the percentage of busy runners greater than which will
trigger the hpa to scale runners up.
description: ScaleUpThreshold is the percentage of busy runners greater than which will trigger the hpa to scale runners up.
type: string
type:
description: |-
Type is the type of metric to be used for autoscaling.
It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
description: Type is the type of metric to be used for autoscaling. It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
type: string
type: object
type: array
@@ -137,9 +110,7 @@ spec:
description: MinReplicas is the minimum number of replicas the deployment is allowed to scale
type: integer
scaleDownDelaySecondsAfterScaleOut:
description: |-
ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up
Used to prevent flapping (down->up->down->... loop)
description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up Used to prevent flapping (down->up->down->... loop)
type: integer
scaleTargetRef:
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
@@ -155,18 +126,7 @@ spec:
type: string
type: object
scaleUpTriggers:
description: |-
ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
on each webhook requested received by the webhookBasedAutoscaler.
This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
Note that the added runners remain until the next sync period at least,
and they may or may not be used by GitHub Actions depending on the timing.
They are intended to be used to gain "resource slack" immediately after you
receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
description: "ScaleUpTriggers is an experimental feature to increase the desired replicas by 1 on each webhook requested received by the webhookBasedAutoscaler. \n This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster. \n Note that the added runners remain until the next sync period at least, and they may or may not be used by GitHub Actions depending on the timing. They are intended to be used to gain \"resource slack\" immediately after you receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available."
items:
properties:
amount:
@@ -179,18 +139,12 @@ spec:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
properties:
names:
description: |-
Names is a list of GitHub Actions glob patterns.
Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
So it is very likely that you can utilize this to trigger depending on the job.
description: Names is a list of GitHub Actions glob patterns. Any check_run event whose name matches one of patterns in the list can trigger autoscaling. Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file. So it is very likely that you can utilize this to trigger depending on the job.
items:
type: string
type: array
repositories:
description: |-
Repositories is a list of GitHub repositories.
Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
description: Repositories is a list of GitHub repositories. Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
items:
type: string
type: array
@@ -215,9 +169,7 @@ spec:
type: array
type: object
push:
description: |-
PushSpec is the condition for triggering scale-up on push event
Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
description: PushSpec is the condition for triggering scale-up on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
type: object
workflowJob:
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
@@ -226,33 +178,23 @@ spec:
type: object
type: array
scheduledOverrides:
description: |-
ScheduledOverrides is the list of ScheduledOverride.
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
The earlier a scheduled override is, the higher it is prioritized.
description: ScheduledOverrides is the list of ScheduledOverride. It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. The earlier a scheduled override is, the higher it is prioritized.
items:
description: |-
ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
description: ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
properties:
endTime:
description: EndTime is the time at which the first override ends.
format: date-time
type: string
minReplicas:
description: |-
MinReplicas is the number of runners while overriding.
If omitted, it doesn't override minReplicas.
description: MinReplicas is the number of runners while overriding. If omitted, it doesn't override minReplicas.
minimum: 0
nullable: true
type: integer
recurrenceRule:
properties:
frequency:
description: |-
Frequency is the name of a predefined interval of each recurrence.
The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
If empty, the corresponding override happens only once.
description: Frequency is the name of a predefined interval of each recurrence. The valid values are "Daily", "Weekly", "Monthly", and "Yearly". If empty, the corresponding override happens only once.
enum:
- Daily
- Weekly
@@ -260,9 +202,7 @@ spec:
- Yearly
type: string
untilTime:
description: |-
UntilTime is the time of the final recurrence.
If empty, the schedule recurs forever.
description: UntilTime is the time of the final recurrence. If empty, the schedule recurs forever.
format: date-time
type: string
type: object
@@ -291,24 +231,18 @@ spec:
type: object
type: array
desiredReplicas:
description: |-
DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
description: DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
type: integer
lastSuccessfulScaleOutTime:
format: date-time
nullable: true
type: string
observedGeneration:
description: |-
ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g.
RunnerDeployment's generation, which is updated on mutation by the API Server.
description: ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g. RunnerDeployment's generation, which is updated on mutation by the API Server.
format: int64
type: integer
scheduledOverridesSummary:
description: |-
ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
for observability.
description: ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output for observability.
type: string
type: object
type: object
@@ -317,3 +251,9 @@ spec:
subresources:
status: {}
preserveUnknownFields: false
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,9 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: manager-role
rules:
- apiGroups:

View File

@@ -1,7 +1,9 @@
---
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
creationTimestamp: null
name: mutating-webhook-configuration
webhooks:
- admissionReviewVersions:
@@ -83,10 +85,12 @@ webhooks:
resources:
- pods
sideEffects: None
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
creationTimestamp: null
name: validating-webhook-configuration
webhooks:
- admissionReviewVersions:

View File

@@ -30,6 +30,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/controllers/actions.github.com/metrics"
@@ -41,7 +42,7 @@ import (
)
const (
autoscalingListenerContainerName = "listener"
autoscalingListenerContainerName = "autoscaler"
autoscalingListenerFinalizerName = "autoscalinglistener.actions.github.com/finalizer"
)
@@ -286,21 +287,6 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
}
logger.Info("Listener pod is deleted")
var secret corev1.Secret
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerConfigName(autoscalingListener)}, &secret)
switch {
case err == nil:
if secret.ObjectMeta.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener config secret")
if err := r.Delete(ctx, &secret); err != nil {
return false, fmt.Errorf("failed to delete listener config secret: %v", err)
}
}
return false, nil
case err != nil && !kerrors.IsNotFound(err):
return false, fmt.Errorf("failed to get listener config secret: %v", err)
}
if autoscalingListener.Spec.Proxy != nil {
logger.Info("Cleaning up the listener proxy secret")
proxySecret := new(corev1.Secret)
@@ -320,38 +306,6 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
logger.Info("Listener proxy secret is deleted")
}
listenerRoleBinding := new(rbacv1.RoleBinding)
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRoleBinding)
switch {
case err == nil:
if listenerRoleBinding.ObjectMeta.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener role binding")
if err := r.Delete(ctx, listenerRoleBinding); err != nil {
return false, fmt.Errorf("failed to delete listener role binding: %v", err)
}
}
return false, nil
case err != nil && !kerrors.IsNotFound(err):
return false, fmt.Errorf("failed to get listener role binding: %v", err)
}
logger.Info("Listener role binding is deleted")
listenerRole := new(rbacv1.Role)
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRole)
switch {
case err == nil:
if listenerRole.ObjectMeta.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener role")
if err := r.Delete(ctx, listenerRole); err != nil {
return false, fmt.Errorf("failed to delete listener role: %v", err)
}
}
return false, nil
case err != nil && !kerrors.IsNotFound(err):
return false, fmt.Errorf("failed to get listener role: %v", err)
}
logger.Info("Listener role is deleted")
logger.Info("Cleaning up the listener service account")
listenerSa := new(corev1.ServiceAccount)
err = r.Get(ctx, types.NamespacedName{Name: scaleSetListenerServiceAccountName(autoscalingListener), Namespace: autoscalingListener.Namespace}, listenerSa)
@@ -432,13 +386,13 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
}
}
cert := ""
if autoscalingListener.Spec.GitHubServerTLS != nil {
var err error
cert, err = r.certificate(ctx, autoscalingRunnerSet, autoscalingListener)
env, err := r.certificateEnvVarForListener(ctx, autoscalingRunnerSet, autoscalingListener)
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to create certificate env var for listener: %v", err)
}
envs = append(envs, env)
}
var metricsConfig *listenerMetricsServerConfig
@@ -449,35 +403,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
}
}
var podConfig corev1.Secret
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerConfigName(autoscalingListener)}, &podConfig); err != nil {
if !kerrors.IsNotFound(err) {
logger.Error(err, "Unable to get listener config secret", "namespace", autoscalingListener.Namespace, "name", scaleSetListenerConfigName(autoscalingListener))
return ctrl.Result{Requeue: true}, err
}
logger.Info("Creating listener config secret")
podConfig, err := r.resourceBuilder.newScaleSetListenerConfig(autoscalingListener, secret, metricsConfig, cert)
if err != nil {
logger.Error(err, "Failed to build listener config secret")
return ctrl.Result{}, err
}
if err := ctrl.SetControllerReference(autoscalingListener, podConfig, r.Scheme); err != nil {
logger.Error(err, "Failed to set controller reference")
return ctrl.Result{}, err
}
if err := r.Create(ctx, podConfig); err != nil {
logger.Error(err, "Unable to create listener config secret", "namespace", podConfig.Namespace, "name", podConfig.Name)
return ctrl.Result{}, err
}
return ctrl.Result{Requeue: true}, nil
}
newPod, err := r.resourceBuilder.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, secret, metricsConfig, envs...)
newPod, err := r.resourceBuilder.newScaleSetListenerPod(autoscalingListener, serviceAccount, secret, metricsConfig, envs...)
if err != nil {
logger.Error(err, "Failed to build listener pod")
return ctrl.Result{}, err
@@ -498,13 +424,13 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
return ctrl.Result{}, nil
}
func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener) (string, error) {
func (r *AutoscalingListenerReconciler) certificateEnvVarForListener(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener) (corev1.EnvVar, error) {
if autoscalingListener.Spec.GitHubServerTLS.CertificateFrom == nil {
return "", fmt.Errorf("githubServerTLS.certificateFrom is not specified")
return corev1.EnvVar{}, fmt.Errorf("githubServerTLS.certificateFrom is not specified")
}
if autoscalingListener.Spec.GitHubServerTLS.CertificateFrom.ConfigMapKeyRef == nil {
return "", fmt.Errorf("githubServerTLS.certificateFrom.configMapKeyRef is not specified")
return corev1.EnvVar{}, fmt.Errorf("githubServerTLS.certificateFrom.configMapKeyRef is not specified")
}
var configmap corev1.ConfigMap
@@ -517,7 +443,7 @@ func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autosca
&configmap,
)
if err != nil {
return "", fmt.Errorf(
return corev1.EnvVar{}, fmt.Errorf(
"failed to get configmap %s: %w",
autoscalingListener.Spec.GitHubServerTLS.CertificateFrom.ConfigMapKeyRef.Name,
err,
@@ -526,14 +452,17 @@ func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autosca
certificate, ok := configmap.Data[autoscalingListener.Spec.GitHubServerTLS.CertificateFrom.ConfigMapKeyRef.Key]
if !ok {
return "", fmt.Errorf(
return corev1.EnvVar{}, fmt.Errorf(
"key %s is not found in configmap %s",
autoscalingListener.Spec.GitHubServerTLS.CertificateFrom.ConfigMapKeyRef.Key,
autoscalingListener.Spec.GitHubServerTLS.CertificateFrom.ConfigMapKeyRef.Name,
)
}
return certificate, nil
return corev1.EnvVar{
Name: "GITHUB_SERVER_ROOT_CA",
Value: certificate,
}, nil
}
func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
@@ -714,7 +643,7 @@ func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error
return err
}
labelBasedWatchFunc := func(_ context.Context, obj client.Object) []reconcile.Request {
labelBasedWatchFunc := func(obj client.Object) []reconcile.Request {
var requests []reconcile.Request
labels := obj.GetLabels()
namespace, ok := labels["auto-scaling-listener-namespace"]
@@ -741,8 +670,8 @@ func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error
For(&v1alpha1.AutoscalingListener{}).
Owns(&corev1.Pod{}).
Owns(&corev1.ServiceAccount{}).
Watches(&rbacv1.Role{}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
Watches(&rbacv1.RoleBinding{}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
Watches(&source.Kind{Type: &rbacv1.Role{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
Watches(&source.Kind{Type: &rbacv1.RoleBinding{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
Complete(r)
}

View File

@@ -2,7 +2,6 @@ package actionsgithubcom
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
@@ -14,7 +13,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
listenerconfig "github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
kerrors "k8s.io/apimachinery/pkg/api/errors"
@@ -105,19 +103,6 @@ var _ = Describe("Test AutoScalingListener controller", func() {
Context("When creating a new AutoScalingListener", func() {
It("It should create/add all required resources for a new AutoScalingListener (finalizer, secret, service account, role, rolebinding, pod)", func() {
config := new(corev1.Secret)
Eventually(
func() error {
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerConfigName(autoscalingListener), Namespace: configSecret.Namespace}, config)
if err != nil {
return err
}
return nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).Should(Succeed(), "Config secret should be created")
// Check if finalizer is added
created := new(actionsv1alpha1.AutoscalingListener)
Eventually(
@@ -218,8 +203,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
return pod.Name, nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
// Delete the AutoScalingListener
err := k8sClient.Delete(ctx, autoscalingListener)
@@ -241,41 +225,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
return nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).ShouldNot(Succeed(), "failed to delete pod")
// Cleanup the listener role binding
Eventually(
func() bool {
roleBinding := new(rbacv1.RoleBinding)
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding)
return kerrors.IsNotFound(err)
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).Should(BeTrue(), "failed to delete role binding")
// Cleanup the listener role
Eventually(
func() bool {
role := new(rbacv1.Role)
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
return kerrors.IsNotFound(err)
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).Should(BeTrue(), "failed to delete role")
// Cleanup the listener config
Eventually(
func() bool {
config := new(corev1.Secret)
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerConfigName(autoscalingListener), Namespace: autoscalingListener.Namespace}, config)
return kerrors.IsNotFound(err)
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).Should(BeTrue(), "failed to delete config secret")
autoscalingListenerTestInterval).ShouldNot(Succeed(), "failed to delete pod")
// Cleanup the listener service account
Eventually(
@@ -425,7 +375,7 @@ var _ = Describe("Test AutoScalingListener customization", func() {
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: autoscalingListenerContainerName,
Name: "listener",
ImagePullPolicy: corev1.PullAlways,
SecurityContext: &corev1.SecurityContext{
RunAsUser: &runAsUser,
@@ -527,17 +477,6 @@ var _ = Describe("Test AutoScalingListener customization", func() {
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListenerFinalizerName), "AutoScalingListener should have a finalizer")
// Check if config is created
config := new(corev1.Secret)
Eventually(
func() error {
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerConfigName(autoscalingListener), Namespace: autoscalingListener.Namespace}, config)
return err
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).Should(Succeed(), "Config secret should be created")
// Check if pod is created
pod := new(corev1.Pod)
Eventually(
@@ -555,7 +494,7 @@ var _ = Describe("Test AutoScalingListener customization", func() {
Expect(pod.Spec.SecurityContext.RunAsUser).To(Equal(&runAsUser), "Pod should have the correct security context")
Expect(pod.Spec.Containers[0].Name).To(Equal(autoscalingListenerContainerName), "Pod should have the correct container name")
Expect(pod.Spec.Containers[0].Name).NotTo(Equal("listener"), "Pod should have the correct container name")
Expect(pod.Spec.Containers[0].SecurityContext.RunAsUser).To(Equal(&runAsUser), "Pod should have the correct security context")
Expect(pod.Spec.Containers[0].ImagePullPolicy).To(Equal(corev1.PullAlways), "Pod should have the correct image pull policy")
@@ -762,155 +701,6 @@ var _ = Describe("Test AutoScalingListener controller with proxy", func() {
})
})
var _ = Describe("Test AutoScalingListener controller with template modification", func() {
var ctx context.Context
var mgr ctrl.Manager
var autoscalingNS *corev1.Namespace
var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet
var configSecret *corev1.Secret
var autoscalingListener *actionsv1alpha1.AutoscalingListener
createRunnerSetAndListener := func(listenerTemplate *corev1.PodTemplateSpec) {
min := 1
max := 10
autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs",
Namespace: autoscalingNS.Name,
},
Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name,
MaxRunners: &max,
MinRunners: &min,
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "runner",
Image: "ghcr.io/actions/runner",
},
},
},
},
ListenerTemplate: listenerTemplate,
},
}
err := k8sClient.Create(ctx, autoscalingRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
autoscalingListener = &actionsv1alpha1.AutoscalingListener{
ObjectMeta: metav1.ObjectMeta{
Name: "test-asl",
Namespace: autoscalingNS.Name,
},
Spec: actionsv1alpha1.AutoscalingListenerSpec{
GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name,
RunnerScaleSetId: 1,
AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace,
AutoscalingRunnerSetName: autoscalingRunnerSet.Name,
EphemeralRunnerSetName: "test-ers",
MaxRunners: 10,
MinRunners: 1,
Image: "ghcr.io/owner/repo",
Template: listenerTemplate,
},
}
err = k8sClient.Create(ctx, autoscalingListener)
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingListener")
}
BeforeEach(func() {
ctx = context.Background()
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
controller := &AutoscalingListenerReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
}
err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
startManagers(GinkgoT(), mgr)
})
It("Should create listener pod with modified spec", func() {
runAsUser1001 := int64(1001)
runAsUser1000 := int64(1000)
tmpl := &corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
"test-annotation-key": "test-annotation-value",
},
Labels: map[string]string{
"test-label-key": "test-label-value",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: autoscalingListenerContainerName,
ImagePullPolicy: corev1.PullAlways,
SecurityContext: &corev1.SecurityContext{
RunAsUser: &runAsUser1001,
},
},
{
Name: "sidecar",
ImagePullPolicy: corev1.PullIfNotPresent,
Image: "busybox",
},
},
SecurityContext: &corev1.PodSecurityContext{
RunAsUser: &runAsUser1000,
},
},
}
createRunnerSetAndListener(tmpl)
// wait for listener pod to be created
Eventually(
func(g Gomega) {
pod := new(corev1.Pod)
err := k8sClient.Get(
ctx,
client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace},
pod,
)
g.Expect(err).NotTo(HaveOccurred(), "failed to get pod")
g.Expect(pod.ObjectMeta.Annotations).To(HaveKeyWithValue("test-annotation-key", "test-annotation-value"), "pod annotations should be copied from runner set template")
g.Expect(pod.ObjectMeta.Labels).To(HaveKeyWithValue("test-label-key", "test-label-value"), "pod labels should be copied from runner set template")
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(Succeed(), "failed to create listener pod with proxy details")
// Delete the AutoScalingListener
err := k8sClient.Delete(ctx, autoscalingListener)
Expect(err).NotTo(HaveOccurred(), "failed to delete test AutoScalingListener")
Eventually(
func(g Gomega) {
var proxySecret corev1.Secret
err := k8sClient.Get(
ctx,
types.NamespacedName{Name: proxyListenerSecretName(autoscalingListener), Namespace: autoscalingNS.Name},
&proxySecret,
)
g.Expect(kerrors.IsNotFound(err)).To(BeTrue())
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(Succeed(), "failed to delete secret with proxy details")
})
})
var _ = Describe("Test GitHub Server TLS configuration", func() {
var ctx context.Context
var mgr ctrl.Manager
@@ -1026,26 +816,31 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
})
Context("When creating a new AutoScalingListener", func() {
It("It should set the certificates in the config of the pod", func() {
config := new(corev1.Secret)
It("It should set the certificates as an environment variable on the pod", func() {
pod := new(corev1.Pod)
Eventually(
func(g Gomega) {
err := k8sClient.Get(
ctx,
client.ObjectKey{
Name: scaleSetListenerConfigName(autoscalingListener),
Name: autoscalingListener.Name,
Namespace: autoscalingListener.Namespace,
},
config,
pod,
)
g.Expect(err).NotTo(HaveOccurred(), "failed to get pod")
g.Expect(pod.Spec.Containers).NotTo(BeEmpty(), "pod should have containers")
g.Expect(pod.Spec.Containers[0].Env).NotTo(BeEmpty(), "pod should have env variables")
g.Expect(config.Data["config.json"]).ToNot(BeEmpty(), "listener configuration file should not be empty")
var listenerConfig listenerconfig.Config
err = json.Unmarshal(config.Data["config.json"], &listenerConfig)
g.Expect(err).NotTo(HaveOccurred(), "failed to parse listener configuration file")
var env *corev1.EnvVar
for _, e := range pod.Spec.Containers[0].Env {
if e.Name == "GITHUB_SERVER_ROOT_CA" {
env = &e
break
}
}
g.Expect(env).NotTo(BeNil(), "pod should have an env variable named GITHUB_SERVER_ROOT_CA_PATH")
cert, err := os.ReadFile(filepath.Join(
"../../",
@@ -1056,7 +851,7 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
))
g.Expect(err).NotTo(HaveOccurred(), "failed to read rootCA.crt")
g.Expect(listenerConfig.ServerRootCA).To(
g.Expect(env.Value).To(
BeEquivalentTo(string(cert)),
"GITHUB_SERVER_ROOT_CA should be the rootCA.crt",
)

View File

@@ -39,17 +39,14 @@ import (
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
annotationKeyRunnerSpecHash = "actions.github.com/runner-spec-hash"
// annotationKeyValuesHash is hash of the entire values json.
// This is used to determine if the values have changed, so we can
// re-create listener.
annotationKeyValuesHash = "actions.github.com/values-hash"
labelKeyRunnerSpecHash = "runner-spec-hash"
autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer"
runnerScaleSetIdAnnotationKey = "runner-scale-set-id"
runnerScaleSetNameAnnotationKey = "runner-scale-set-name"
)
type UpdateStrategy string
@@ -209,7 +206,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
}
// Make sure the runner scale set name is up to date
currentRunnerScaleSetName, ok := autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName]
currentRunnerScaleSetName, ok := autoscalingRunnerSet.Annotations[runnerScaleSetNameAnnotationKey]
if !ok || (len(autoscalingRunnerSet.Spec.RunnerScaleSetName) > 0 && !strings.EqualFold(currentRunnerScaleSetName, autoscalingRunnerSet.Spec.RunnerScaleSetName)) {
log.Info("AutoScalingRunnerSet runner scale set name changed. Updating the runner scale set.")
return r.updateRunnerScaleSetName(ctx, autoscalingRunnerSet, log)
@@ -235,8 +232,9 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log)
}
desiredSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
for _, runnerSet := range existingRunnerSets.all() {
log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Annotations[annotationKeyRunnerSpecHash])
log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[labelKeyRunnerSpecHash])
}
// Make sure the AutoscalingListener is up and running in the controller namespace
@@ -253,9 +251,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
}
// Our listener pod is out of date, so we need to delete it to get a new recreate.
listenerValuesHashChanged := listener.Annotations[annotationKeyValuesHash] != autoscalingRunnerSet.Annotations[annotationKeyValuesHash]
listenerSpecHashChanged := listener.Annotations[annotationKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash()
if listenerFound && (listenerValuesHashChanged || listenerSpecHashChanged) {
if listenerFound && (listener.Labels[labelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash()) {
log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name)
if err := r.Delete(ctx, listener); err != nil {
if kerrors.IsNotFound(err) {
@@ -269,7 +265,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return ctrl.Result{}, nil
}
if latestRunnerSet.Annotations[annotationKeyRunnerSpecHash] != autoscalingRunnerSet.RunnerSetSpecHash() {
if desiredSpecHash != latestRunnerSet.Labels[labelKeyRunnerSpecHash] {
if r.drainingJobs(&latestRunnerSet.Status) {
log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Waiting for the running and pending runners to finish:", "running", latestRunnerSet.Status.RunningEphemeralRunners, "pending", latestRunnerSet.Status.PendingEphemeralRunners)
log.Info("Scaling down the number of desired replicas to 0")
@@ -471,8 +467,6 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
Version: build.Version,
CommitSHA: build.CommitSHA,
ScaleSetID: runnerScaleSet.Id,
HasProxy: autoscalingRunnerSet.Spec.Proxy != nil,
Subsystem: "controller",
})
logger.Info("Created/Reused a runner scale set", "id", runnerScaleSet.Id, "runnerGroupName", runnerScaleSet.RunnerGroupName)
@@ -485,7 +479,7 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
logger.Info("Adding runner scale set ID, name and runner group name as an annotation and url labels")
if err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Annotations[AnnotationKeyGitHubRunnerScaleSetName] = runnerScaleSet.Name
obj.Annotations[runnerScaleSetNameAnnotationKey] = runnerScaleSet.Name
obj.Annotations[runnerScaleSetIdAnnotationKey] = strconv.Itoa(runnerScaleSet.Id)
obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = runnerScaleSet.RunnerGroupName
if err := applyGitHubURLLabels(obj.Spec.GitHubConfigUrl, obj.Labels); err != nil { // should never happen
@@ -533,10 +527,9 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
return ctrl.Result{}, err
}
logger.Info("Updating runner scale set name and runner group name as annotations")
logger.Info("Updating runner scale set runner group name as an annotation")
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = updatedRunnerScaleSet.RunnerGroupName
obj.Annotations[AnnotationKeyGitHubRunnerScaleSetName] = updatedRunnerScaleSet.Name
}); err != nil {
logger.Error(err, "Failed to update runner group name annotation")
return ctrl.Result{}, err
@@ -572,7 +565,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
logger.Info("Updating runner scale set name as an annotation")
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Annotations[AnnotationKeyGitHubRunnerScaleSetName] = updatedRunnerScaleSet.Name
obj.Annotations[runnerScaleSetNameAnnotationKey] = updatedRunnerScaleSet.Name
}); err != nil {
logger.Error(err, "Failed to update runner scale set name annotation")
return ctrl.Result{}, err
@@ -781,8 +774,8 @@ func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) erro
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.AutoscalingRunnerSet{}).
Owns(&v1alpha1.EphemeralRunnerSet{}).
Watches(&v1alpha1.AutoscalingListener{}, handler.EnqueueRequestsFromMapFunc(
func(_ context.Context, o client.Object) []reconcile.Request {
Watches(&source.Kind{Type: &v1alpha1.AutoscalingListener{}}, handler.EnqueueRequestsFromMapFunc(
func(o client.Object) []reconcile.Request {
autoscalingListener := o.(*v1alpha1.AutoscalingListener)
return []reconcile.Request{
{

View File

@@ -280,10 +280,6 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
// This should trigger re-creation of EphemeralRunnerSet and Listener
patched := autoscalingRunnerSet.DeepCopy()
patched.Spec.Template.Spec.PriorityClassName = "test-priority-class"
if patched.ObjectMeta.Annotations == nil {
patched.ObjectMeta.Annotations = make(map[string]string)
}
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "test-hash"
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
autoscalingRunnerSet = patched.DeepCopy()
@@ -301,10 +297,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items))
}
return runnerSetList.Items[0].Annotations[annotationKeyRunnerSpecHash], nil
return runnerSetList.Items[0].Labels[labelKeyRunnerSpecHash], nil
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Annotations[annotationKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created")
autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[labelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created")
// We should create a new listener
Eventually(
@@ -338,55 +334,6 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
// We should not re-create a new EphemeralRunnerSet
Consistently(
func() (string, error) {
runnerSetList := new(v1alpha1.EphemeralRunnerSetList)
err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace))
if err != nil {
return "", err
}
if len(runnerSetList.Items) != 1 {
return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items))
}
return string(runnerSetList.Items[0].UID), nil
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval).Should(BeEquivalentTo(string(runnerSet.UID)), "New EphemeralRunnerSet should not be created")
// We should only re-create a new listener
Eventually(
func() (string, error) {
listener := new(v1alpha1.AutoscalingListener)
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener)
if err != nil {
return "", err
}
return string(listener.UID), nil
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(string(listener.UID)), "New Listener should be created")
// Only update the values hash for the autoscaling runner set
// This should trigger re-creation of the Listener only
runnerSetList = new(v1alpha1.EphemeralRunnerSetList)
err = k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace))
Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet")
Expect(len(runnerSetList.Items)).To(Equal(1), "There should be 1 EphemeralRunnerSet")
runnerSet = runnerSetList.Items[0]
listener = new(v1alpha1.AutoscalingListener)
err = k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener)
Expect(err).NotTo(HaveOccurred(), "failed to get Listener")
patched = autoscalingRunnerSet.DeepCopy()
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "hash-changes"
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
// We should not re-create a new EphemeralRunnerSet
Consistently(
func() (string, error) {
@@ -546,10 +493,6 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
// Patch the AutoScalingRunnerSet image which should trigger
// the recreation of the Listener and EphemeralRunnerSet
patched := autoscalingRunnerSet.DeepCopy()
if patched.ObjectMeta.Annotations == nil {
patched.ObjectMeta.Annotations = make(map[string]string)
}
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "testgroup2"
patched.Spec.Template.Spec = corev1.PodSpec{
Containers: []corev1.Container{
{
@@ -558,6 +501,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
},
},
}
// patched.Spec.Template.Spec.PriorityClassName = "test-priority-class"
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
autoscalingRunnerSet = patched.DeepCopy()
@@ -754,7 +698,7 @@ var _ = Describe("Test AutoScalingController updates", Ordered, func() {
return "", err
}
if val, ok := ars.Annotations[AnnotationKeyGitHubRunnerScaleSetName]; ok {
if val, ok := ars.Annotations[runnerScaleSetNameAnnotationKey]; ok {
return val, nil
}
@@ -778,7 +722,7 @@ var _ = Describe("Test AutoScalingController updates", Ordered, func() {
return "", err
}
if val, ok := ars.Annotations[AnnotationKeyGitHubRunnerScaleSetName]; ok {
if val, ok := ars.Annotations[runnerScaleSetNameAnnotationKey]; ok {
return val, nil
}

View File

@@ -2,6 +2,7 @@ package actionsgithubcom
import (
"github.com/actions/actions-runner-controller/logging"
corev1 "k8s.io/api/core/v1"
)
const (
@@ -39,11 +40,7 @@ const (
// Finalizer used to protect resources from deletion while AutoscalingRunnerSet is running
const AutoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection"
const (
AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name"
AnnotationKeyGitHubRunnerScaleSetName = "actions.github.com/runner-scale-set-name"
AnnotationKeyPatchID = "actions.github.com/patch-id"
)
const AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name"
// Labels applied to listener roles
const (
@@ -62,6 +59,10 @@ const (
AnnotationKeyNoPermissionServiceAccountName = "actions.github.com/cleanup-no-permission-service-account-name"
)
// DefaultScaleSetListenerImagePullPolicy is the default pull policy applied
// to the listener when ImagePullPolicy is not specified
const DefaultScaleSetListenerImagePullPolicy = corev1.PullIfNotPresent
// DefaultScaleSetListenerLogLevel is the default log level applied
const DefaultScaleSetListenerLogLevel = string(logging.LogLevelDebug)
@@ -70,9 +71,3 @@ const DefaultScaleSetListenerLogFormat = string(logging.LogFormatText)
// ownerKey is field selector matching the owner name of a particular resource
const resourceOwnerKey = ".metadata.controller"
// EphemeralRunner pod creation failure reasons
const (
ReasonTooManyPodFailures = "TooManyPodFailures"
ReasonInvalidPodFailure = "InvalidPod"
)

View File

@@ -133,23 +133,6 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, nil
}
if ephemeralRunner.IsDone() {
log.Info("Cleaning up resources after after ephemeral runner termination", "phase", ephemeralRunner.Status.Phase)
done, err := r.cleanupResources(ctx, ephemeralRunner, log)
if err != nil {
log.Error(err, "Failed to clean up ephemeral runner owned resources")
return ctrl.Result{}, err
}
if !done {
log.Info("Waiting for ephemeral runner owned resources to be deleted")
return ctrl.Result{Requeue: true}, nil
}
// Stop reconciling on this object.
// The EphemeralRunnerSet is responsible for cleaning it up.
log.Info("EphemeralRunner has already finished. Stopping reconciliation and waiting for EphemeralRunnerSet to clean it up", "phase", ephemeralRunner.Status.Phase)
return ctrl.Result{}, nil
}
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerActionsFinalizerName) {
log.Info("Adding runner registration finalizer")
err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
@@ -176,6 +159,13 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, nil
}
if ephemeralRunner.Status.Phase == corev1.PodSucceeded || ephemeralRunner.Status.Phase == corev1.PodFailed {
// Stop reconciling on this object.
// The EphemeralRunnerSet is responsible for cleaning it up.
log.Info("EphemeralRunner has already finished. Stopping reconciliation and waiting for EphemeralRunnerSet to clean it up", "phase", ephemeralRunner.Status.Phase)
return ctrl.Result{}, nil
}
if ephemeralRunner.Status.RunnerId == 0 {
log.Info("Creating new ephemeral runner registration and updating status with runner config")
return r.updateStatusWithRunnerConfig(ctx, ephemeralRunner, log)
@@ -201,8 +191,7 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
case len(ephemeralRunner.Status.Failures) > 5:
log.Info("EphemeralRunner has failed more than 5 times. Marking it as failed")
errMessage := fmt.Sprintf("Pod has failed to start more than 5 times: %s", pod.Status.Message)
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, ReasonTooManyPodFailures, log); err != nil {
if err := r.markAsFailed(ctx, ephemeralRunner, log); err != nil {
log.Error(err, "Failed to set ephemeral runner to phase Failed")
return ctrl.Result{}, err
}
@@ -211,22 +200,7 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
default:
// Pod was not found. Create if the pod has never been created
log.Info("Creating new EphemeralRunner pod.")
result, err := r.createPod(ctx, ephemeralRunner, secret, log)
switch {
case err == nil:
return result, nil
case kerrors.IsInvalid(err) || kerrors.IsForbidden(err):
log.Error(err, "Failed to create a pod due to unrecoverable failure")
errMessage := fmt.Sprintf("Failed to create the pod: %v", err)
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, ReasonInvalidPodFailure, log); err != nil {
log.Error(err, "Failed to set ephemeral runner to phase Failed")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
default:
log.Error(err, "Failed to create the pod")
return ctrl.Result{}, err
}
return r.createPod(ctx, ephemeralRunner, secret, log)
}
}
@@ -334,7 +308,7 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
}
}
return false, nil
case !kerrors.IsNotFound(err):
case err != nil && !kerrors.IsNotFound(err):
return false, err
}
log.Info("Pod is deleted")
@@ -351,7 +325,7 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
}
}
return false, nil
case !kerrors.IsNotFound(err):
case err != nil && !kerrors.IsNotFound(err):
return false, err
}
log.Info("Secret is deleted")
@@ -449,12 +423,12 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Conte
return false, multierr.Combine(errs...)
}
func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, errMessage string, reason string, log logr.Logger) error {
func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
log.Info("Updating ephemeral runner status to Failed")
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.Phase = corev1.PodFailed
obj.Status.Reason = reason
obj.Status.Message = errMessage
obj.Status.Reason = "TooManyPodFailures"
obj.Status.Message = "Pod has failed to start more than 5 times"
}); err != nil {
return fmt.Errorf("failed to update ephemeral runner status Phase/Message: %v", err)
}
@@ -685,7 +659,7 @@ func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context,
return nil
}
log.Info("Updating ephemeral runner status with pod phase", "statusPhase", pod.Status.Phase, "statusReason", pod.Status.Reason, "statusMessage", pod.Status.Message)
log.Info("Updating ephemeral runner status with pod phase", "phase", pod.Status.Phase, "reason", pod.Status.Reason, "message", pod.Status.Message)
err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.Phase = pod.Status.Phase
obj.Status.Ready = obj.Status.Ready || (pod.Status.Phase == corev1.PodRunning)

View File

@@ -189,25 +189,6 @@ var _ = Describe("EphemeralRunner", func() {
).Should(BeEquivalentTo(true))
})
It("It should failed if a pod template is invalid", func() {
invalideEphemeralRunner := newExampleRunner("invalid-ephemeral-runner", autoscalingNS.Name, configSecret.Name)
invalideEphemeralRunner.Spec.Spec.PriorityClassName = "notexist"
err := k8sClient.Create(ctx, invalideEphemeralRunner)
Expect(err).To(BeNil())
updated := new(v1alpha1.EphemeralRunner)
Eventually(func() (corev1.PodPhase, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: invalideEphemeralRunner.Name, Namespace: invalideEphemeralRunner.Namespace}, updated)
if err != nil {
return "", nil
}
return updated.Status.Phase, nil
}, timeout, interval).Should(BeEquivalentTo(corev1.PodFailed))
Expect(updated.Status.Reason).Should(Equal("InvalidPod"))
Expect(updated.Status.Message).Should(Equal("Failed to create the pod: pods \"invalid-ephemeral-runner\" is forbidden: no PriorityClass with name notexist was found"))
})
It("It should clean up resources when deleted", func() {
// wait for pod to be created
pod := new(corev1.Pod)

View File

@@ -22,7 +22,6 @@ import (
"fmt"
"net/http"
"sort"
"strconv"
"strings"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
@@ -157,14 +156,14 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
return ctrl.Result{}, err
}
ephemeralRunnerState := newEphemeralRunnerState(ephemeralRunnerList)
pendingEphemeralRunners, runningEphemeralRunners, finishedEphemeralRunners, failedEphemeralRunners, deletingEphemeralRunners := categorizeEphemeralRunners(ephemeralRunnerList)
log.Info("Ephemeral runner counts",
"pending", len(ephemeralRunnerState.pending),
"running", len(ephemeralRunnerState.running),
"finished", len(ephemeralRunnerState.finished),
"failed", len(ephemeralRunnerState.failed),
"deleting", len(ephemeralRunnerState.deleting),
"pending", len(pendingEphemeralRunners),
"running", len(runningEphemeralRunners),
"finished", len(finishedEphemeralRunners),
"failed", len(failedEphemeralRunners),
"deleting", len(deletingEphemeralRunners),
)
if r.PublishMetrics {
@@ -184,52 +183,54 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
Organization: parsedURL.Organization,
Enterprise: parsedURL.Enterprise,
},
len(ephemeralRunnerState.pending),
len(ephemeralRunnerState.running),
len(ephemeralRunnerState.failed),
len(pendingEphemeralRunners),
len(runningEphemeralRunners),
len(failedEphemeralRunners),
)
}
total := ephemeralRunnerState.scaleTotal()
if ephemeralRunnerSet.Spec.PatchID == 0 || ephemeralRunnerSet.Spec.PatchID != ephemeralRunnerState.latestPatchID {
defer func() {
if err := r.cleanupFinishedEphemeralRunners(ctx, ephemeralRunnerState.finished, log); err != nil {
log.Error(err, "failed to cleanup finished ephemeral runners")
// cleanup finished runners and proceed
var errs []error
for i := range finishedEphemeralRunners {
log.Info("Deleting finished ephemeral runner", "name", finishedEphemeralRunners[i].Name)
if err := r.Delete(ctx, finishedEphemeralRunners[i]); err != nil {
if !kerrors.IsNotFound(err) {
errs = append(errs, err)
}
}()
}
}
log.Info("Scaling comparison", "current", total, "desired", ephemeralRunnerSet.Spec.Replicas)
switch {
case total < ephemeralRunnerSet.Spec.Replicas: // Handle scale up
count := ephemeralRunnerSet.Spec.Replicas - total
log.Info("Creating new ephemeral runners (scale up)", "count", count)
if err := r.createEphemeralRunners(ctx, ephemeralRunnerSet, count, log); err != nil {
log.Error(err, "failed to make ephemeral runner")
return ctrl.Result{}, err
}
if len(errs) > 0 {
mergedErrs := multierr.Combine(errs...)
log.Error(mergedErrs, "Failed to delete finished ephemeral runners")
return ctrl.Result{}, mergedErrs
}
case total > ephemeralRunnerSet.Spec.Replicas: // Handle scale down scenario.
count := total - ephemeralRunnerSet.Spec.Replicas
log.Info("Deleting ephemeral runners (scale down)", "count", count)
if err := r.deleteIdleEphemeralRunners(
ctx,
ephemeralRunnerSet,
ephemeralRunnerState.pending,
ephemeralRunnerState.running,
count,
log,
); err != nil {
log.Error(err, "failed to delete idle runners")
return ctrl.Result{}, err
}
total := len(pendingEphemeralRunners) + len(runningEphemeralRunners) + len(failedEphemeralRunners)
log.Info("Scaling comparison", "current", total, "desired", ephemeralRunnerSet.Spec.Replicas)
switch {
case total < ephemeralRunnerSet.Spec.Replicas: // Handle scale up
count := ephemeralRunnerSet.Spec.Replicas - total
log.Info("Creating new ephemeral runners (scale up)", "count", count)
if err := r.createEphemeralRunners(ctx, ephemeralRunnerSet, count, log); err != nil {
log.Error(err, "failed to make ephemeral runner")
return ctrl.Result{}, err
}
case total > ephemeralRunnerSet.Spec.Replicas: // Handle scale down scenario.
count := total - ephemeralRunnerSet.Spec.Replicas
log.Info("Deleting ephemeral runners (scale down)", "count", count)
if err := r.deleteIdleEphemeralRunners(ctx, ephemeralRunnerSet, pendingEphemeralRunners, runningEphemeralRunners, count, log); err != nil {
log.Error(err, "failed to delete idle runners")
return ctrl.Result{}, err
}
}
desiredStatus := v1alpha1.EphemeralRunnerSetStatus{
CurrentReplicas: total,
PendingEphemeralRunners: len(ephemeralRunnerState.pending),
RunningEphemeralRunners: len(ephemeralRunnerState.running),
FailedEphemeralRunners: len(ephemeralRunnerState.failed),
PendingEphemeralRunners: len(pendingEphemeralRunners),
RunningEphemeralRunners: len(runningEphemeralRunners),
FailedEphemeralRunners: len(failedEphemeralRunners),
}
// Update the status if needed.
@@ -246,21 +247,6 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
return ctrl.Result{}, nil
}
func (r *EphemeralRunnerSetReconciler) cleanupFinishedEphemeralRunners(ctx context.Context, finishedEphemeralRunners []*v1alpha1.EphemeralRunner, log logr.Logger) error {
// cleanup finished runners and proceed
var errs []error
for i := range finishedEphemeralRunners {
log.Info("Deleting finished ephemeral runner", "name", finishedEphemeralRunners[i].Name)
if err := r.Delete(ctx, finishedEphemeralRunners[i]); err != nil {
if !kerrors.IsNotFound(err) {
errs = append(errs, err)
}
}
}
return multierr.Combine(errs...)
}
func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) error {
if ephemeralRunnerSet.Spec.EphemeralRunnerSpec.Proxy == nil {
return nil
@@ -298,19 +284,19 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
return true, nil
}
ephemeralRunnerState := newEphemeralRunnerState(ephemeralRunnerList)
pendingEphemeralRunners, runningEphemeralRunners, finishedEphemeralRunners, failedEphemeralRunners, deletingEphemeralRunners := categorizeEphemeralRunners(ephemeralRunnerList)
log.Info("Clean up runner counts",
"pending", len(ephemeralRunnerState.pending),
"running", len(ephemeralRunnerState.running),
"finished", len(ephemeralRunnerState.finished),
"failed", len(ephemeralRunnerState.failed),
"deleting", len(ephemeralRunnerState.deleting),
"pending", len(pendingEphemeralRunners),
"running", len(runningEphemeralRunners),
"finished", len(finishedEphemeralRunners),
"failed", len(failedEphemeralRunners),
"deleting", len(deletingEphemeralRunners),
)
log.Info("Cleanup finished or failed ephemeral runners")
var errs []error
for _, ephemeralRunner := range append(ephemeralRunnerState.finished, ephemeralRunnerState.failed...) {
for _, ephemeralRunner := range append(finishedEphemeralRunners, failedEphemeralRunners...) {
log.Info("Deleting ephemeral runner", "name", ephemeralRunner.Name)
if err := r.Delete(ctx, ephemeralRunner); err != nil && !kerrors.IsNotFound(err) {
errs = append(errs, err)
@@ -324,7 +310,7 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
}
// avoid fetching the client if we have nothing left to do
if len(ephemeralRunnerState.running) == 0 && len(ephemeralRunnerState.pending) == 0 {
if len(runningEphemeralRunners) == 0 && len(pendingEphemeralRunners) == 0 {
return false, nil
}
@@ -335,7 +321,7 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
log.Info("Cleanup pending or running ephemeral runners")
errs = errs[0:0]
for _, ephemeralRunner := range append(ephemeralRunnerState.pending, ephemeralRunnerState.running...) {
for _, ephemeralRunner := range append(pendingEphemeralRunners, runningEphemeralRunners...) {
log.Info("Removing the ephemeral runner from the service", "name", ephemeralRunner.Name)
_, err := r.deleteEphemeralRunnerWithActionsClient(ctx, ephemeralRunner, actionsClient, log)
if err != nil {
@@ -441,13 +427,12 @@ func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Co
deletedCount := 0
for runners.next() {
ephemeralRunner := runners.object()
isDone := ephemeralRunner.IsDone()
if !isDone && ephemeralRunner.Status.RunnerId == 0 {
if ephemeralRunner.Status.RunnerId == 0 {
log.Info("Skipping ephemeral runner since it is not registered yet", "name", ephemeralRunner.Name)
continue
}
if !isDone && ephemeralRunner.Status.JobRequestId > 0 {
if ephemeralRunner.Status.JobRequestId > 0 {
log.Info("Skipping ephemeral runner since it is running a job", "name", ephemeralRunner.Name, "jobRequestId", ephemeralRunner.Status.JobRequestId)
continue
}
@@ -595,22 +580,16 @@ type ephemeralRunnerStepper struct {
index int
}
func newEphemeralRunnerStepper(primary []*v1alpha1.EphemeralRunner, othersOrdered ...[]*v1alpha1.EphemeralRunner) *ephemeralRunnerStepper {
sort.Slice(primary, func(i, j int) bool {
return primary[i].GetCreationTimestamp().Time.Before(primary[j].GetCreationTimestamp().Time)
func newEphemeralRunnerStepper(pending, running []*v1alpha1.EphemeralRunner) *ephemeralRunnerStepper {
sort.Slice(pending, func(i, j int) bool {
return pending[i].GetCreationTimestamp().Time.Before(pending[j].GetCreationTimestamp().Time)
})
sort.Slice(running, func(i, j int) bool {
return running[i].GetCreationTimestamp().Time.Before(running[j].GetCreationTimestamp().Time)
})
for _, bucket := range othersOrdered {
sort.Slice(bucket, func(i, j int) bool {
return bucket[i].GetCreationTimestamp().Time.Before(bucket[j].GetCreationTimestamp().Time)
})
}
for _, bucket := range othersOrdered {
primary = append(primary, bucket...)
}
return &ephemeralRunnerStepper{
items: primary,
items: append(pending, running...),
index: -1,
}
}
@@ -634,48 +613,28 @@ func (s *ephemeralRunnerStepper) len() int {
return len(s.items)
}
type ephemeralRunnerState struct {
pending []*v1alpha1.EphemeralRunner
running []*v1alpha1.EphemeralRunner
finished []*v1alpha1.EphemeralRunner
failed []*v1alpha1.EphemeralRunner
deleting []*v1alpha1.EphemeralRunner
latestPatchID int
}
func newEphemeralRunnerState(ephemeralRunnerList *v1alpha1.EphemeralRunnerList) *ephemeralRunnerState {
var ephemeralRunnerState ephemeralRunnerState
func categorizeEphemeralRunners(ephemeralRunnerList *v1alpha1.EphemeralRunnerList) (pendingEphemeralRunners, runningEphemeralRunners, finishedEphemeralRunners, failedEphemeralRunners, deletingEphemeralRunners []*v1alpha1.EphemeralRunner) {
for i := range ephemeralRunnerList.Items {
r := &ephemeralRunnerList.Items[i]
patchID, err := strconv.Atoi(r.Annotations[AnnotationKeyPatchID])
if err == nil && patchID > ephemeralRunnerState.latestPatchID {
ephemeralRunnerState.latestPatchID = patchID
}
if !r.ObjectMeta.DeletionTimestamp.IsZero() {
ephemeralRunnerState.deleting = append(ephemeralRunnerState.deleting, r)
deletingEphemeralRunners = append(deletingEphemeralRunners, r)
continue
}
switch r.Status.Phase {
case corev1.PodRunning:
ephemeralRunnerState.running = append(ephemeralRunnerState.running, r)
runningEphemeralRunners = append(runningEphemeralRunners, r)
case corev1.PodSucceeded:
ephemeralRunnerState.finished = append(ephemeralRunnerState.finished, r)
finishedEphemeralRunners = append(finishedEphemeralRunners, r)
case corev1.PodFailed:
ephemeralRunnerState.failed = append(ephemeralRunnerState.failed, r)
failedEphemeralRunners = append(failedEphemeralRunners, r)
default:
// Pending or no phase should be considered as pending.
//
// If field is not set, that means that the EphemeralRunner
// did not yet have chance to update the Status.Phase field.
ephemeralRunnerState.pending = append(ephemeralRunnerState.pending, r)
pendingEphemeralRunners = append(pendingEphemeralRunners, r)
}
}
return &ephemeralRunnerState
}
func (s *ephemeralRunnerState) scaleTotal() int {
return len(s.pending) + len(s.running) + len(s.failed)
return
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"crypto/tls"
"encoding/base64"
"errors"
"fmt"
"net/http"
"net/http/httptest"
@@ -275,17 +274,14 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
})
Context("When a new EphemeralRunnerSet scale up and down", func() {
It("Should scale only on patch ID change", func() {
It("It should delete finished EphemeralRunner and create new EphemeralRunner", func() {
created := new(actionsv1alpha1.EphemeralRunnerSet)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
patchID := 1
// Scale up the EphemeralRunnerSet
updated := created.DeepCopy()
updated.Spec.Replicas = 5
updated.Spec.PatchID = patchID
err = k8sClient.Update(ctx, updated)
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
@@ -321,8 +317,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
return len(runnerList.Items), nil
},
ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval,
).Should(BeEquivalentTo(5), "5 EphemeralRunner should be created")
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(5), "5 EphemeralRunner should be created")
// Mark one of the EphemeralRunner as finished
finishedRunner := runnerList.Items[4].DeepCopy()
@@ -330,7 +325,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Status().Patch(ctx, finishedRunner, client.MergeFrom(&runnerList.Items[4]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
// Wait for the finished EphemeralRunner to be set to succeeded
// Wait for the finished EphemeralRunner to be deleted
Eventually(
func() error {
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
@@ -340,35 +335,17 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
}
for _, runner := range runnerList.Items {
if runner.Name != finishedRunner.Name {
continue
if runner.Name == finishedRunner.Name {
return fmt.Errorf("EphemeralRunner is not deleted")
}
if runner.Status.Phase != corev1.PodSucceeded {
return fmt.Errorf("EphemeralRunner is not finished")
}
// found pod succeeded
return nil
}
return errors.New("Finished ephemeral runner is not found")
return nil
},
ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval,
).Should(Succeed(), "Finished EphemeralRunner should be deleted")
ephemeralRunnerSetTestInterval).Should(Succeed(), "Finished EphemeralRunner should be deleted")
// After one ephemeral runner is finished, simulate job done patch
patchID++
original := new(actionsv1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, original)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
updated = original.DeepCopy()
updated.Spec.PatchID = patchID
updated.Spec.Replicas = 4
err = k8sClient.Patch(ctx, updated, client.MergeFrom(original))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
// Only finished ephemeral runner should be deleted
// We should still have the EphemeralRunnerSet scale up
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
@@ -377,27 +354,35 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
return -1, err
}
for _, runner := range runnerList.Items {
if runner.Status.Phase == corev1.PodSucceeded {
return -1, fmt.Errorf("Finished EphemeralRunner should be deleted")
// Set status to simulate a configured EphemeralRunner
refetch := false
for i, runner := range runnerList.Items {
if runner.Status.RunnerId == 0 {
updatedRunner := runner.DeepCopy()
updatedRunner.Status.Phase = corev1.PodRunning
updatedRunner.Status.RunnerId = i + 100
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
refetch = true
}
}
if refetch {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
}
return len(runnerList.Items), nil
},
ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval,
).Should(BeEquivalentTo(4), "4 EphemeralRunner should be created")
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(5), "5 EphemeralRunner should be created")
// Scaling down the EphemeralRunnerSet
patchID++
original = new(actionsv1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, original)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
updated = original.DeepCopy()
updated.Spec.PatchID = patchID
// Scale down the EphemeralRunnerSet
updated = created.DeepCopy()
updated.Spec.Replicas = 3
err = k8sClient.Patch(ctx, updated, client.MergeFrom(original))
err = k8sClient.Patch(ctx, updated, client.MergeFrom(created))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
// Wait for the EphemeralRunnerSet to be scaled down
@@ -432,8 +417,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
return len(runnerList.Items), nil
},
ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval,
).Should(BeEquivalentTo(3), "3 EphemeralRunner should be created")
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(3), "3 EphemeralRunner should be created")
// We will not scale down runner that is running jobs
runningRunner := runnerList.Items[0].DeepCopy()
@@ -446,15 +430,10 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Status().Patch(ctx, runningRunner, client.MergeFrom(&runnerList.Items[1]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
// Scale down to 1 while 2 are running
patchID++
original = new(actionsv1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, original)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
updated = original.DeepCopy()
updated.Spec.PatchID = patchID
// Scale down to 1
updated = created.DeepCopy()
updated.Spec.Replicas = 1
err = k8sClient.Patch(ctx, updated, client.MergeFrom(original))
err = k8sClient.Patch(ctx, updated, client.MergeFrom(created))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
// Wait for the EphemeralRunnerSet to be scaled down to 2 since we still have 2 runner running jobs
@@ -489,8 +468,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
return len(runnerList.Items), nil
},
ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval,
).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created")
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created")
// We will not scale down failed runner
failedRunner := runnerList.Items[0].DeepCopy()
@@ -498,8 +476,15 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Status().Patch(ctx, failedRunner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
// Scale down to 0
updated = created.DeepCopy()
updated.Spec.Replicas = 0
err = k8sClient.Patch(ctx, updated, client.MergeFrom(created))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
// We should not scale down the EphemeralRunnerSet since we still have 1 runner running job and 1 failed runner
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
Eventually(
Consistently(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
@@ -529,8 +514,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
return len(runnerList.Items), nil
},
ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval,
).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created")
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created")
// We will scale down to 0 when the running job is completed and the failed runner is deleted
runningRunner = runnerList.Items[1].DeepCopy()
@@ -541,17 +525,6 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Delete(ctx, &runnerList.Items[0])
Expect(err).NotTo(HaveOccurred(), "failed to delete EphemeralRunner")
// Scale down to 0 while 1 ephemeral runner is failed
patchID++
original = new(actionsv1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, original)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
updated = original.DeepCopy()
updated.Spec.PatchID = patchID
updated.Spec.Replicas = 0
err = k8sClient.Patch(ctx, updated, client.MergeFrom(original))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
// Wait for the EphemeralRunnerSet to be scaled down to 0
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
Eventually(
@@ -584,8 +557,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
return len(runnerList.Items), nil
},
ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval,
).Should(BeEquivalentTo(0), "0 EphemeralRunner should be created")
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(0), "0 EphemeralRunner should be created")
})
It("Should update status on Ephemeral Runner state changes", func() {

View File

@@ -9,7 +9,6 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
@@ -46,11 +45,8 @@ func createNamespace(t ginkgo.GinkgoTInterface, client client.Client) (*corev1.N
})
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
Cache: cache.Options{
DefaultNamespaces: map[string]cache.Config{
ns.Name: {},
},
},
Namespace: ns.Name,
MetricsBindAddress: "0",
})
require.NoError(t, err)

Some files were not shown because too many files have changed in this diff Show More