Compare commits

..

13 Commits

Author SHA1 Message Date
Bassem Dghaidi
3b1fe11d3d Prevent releases on wrong tag 2023-03-14 12:56:45 +00:00
Bassem Dghaidi
9c3fce4727 Prevent releases on wrong tag 2023-03-14 12:52:38 +00:00
Bassem Dghaidi
ee3bf67544 Prevent releases on wrong tag 2023-03-14 12:51:12 +00:00
Bassem Dghaidi
29ca49037a Prevent releases on wrong tag 2023-03-14 12:47:40 +00:00
Bassem Dghaidi
3e37a29c21 Prevent releases on wrong tag 2023-03-14 12:42:22 +00:00
Tingluo Huang
bd9f32e354 Create separate chart validation workflow for gha-* charts. (#2393)
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2023-03-13 12:44:54 -04:00
Nikola Jokic
babbfc77d5 Surface EphemeralRunnerSet stats to AutoscalingRunnerSet (#2382) 2023-03-13 16:16:28 +01:00
Bassem Dghaidi
322df79617 Delete renovate.json5 (#2397) 2023-03-13 08:39:07 -04:00
Bassem Dghaidi
1c7c6639ed Fix wrong file name in the workflow (#2394) 2023-03-13 06:56:21 -04:00
Hamish Forbes
bcaac39a2e feat(actionsmetrics): Add owner and workflow_name labels to workflow job metrics (#2225) 2023-03-13 10:50:36 +09:00
Milas Bowman
af625dd1cb Upgrade to Docker Engine v20.10.23 (#2328)
Co-authored-by: Yusuke Kuoka <ykuoka@gmail.com>
2023-03-13 10:29:40 +09:00
Bassem Dghaidi
44969659df Add upgrade steps (#2392)
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2023-03-10 12:14:00 -05:00
Nikola Jokic
a5f98dea75 Refactor main.go and introduce make run-scaleset to be able to run manager locally (#2337) 2023-03-10 18:05:51 +01:00
36 changed files with 709 additions and 224 deletions

View File

@@ -1,43 +0,0 @@
{
"extends": ["config:base"],
"labels": ["dependencies"],
"packageRules": [
{
// automatically merge an update of runner
"matchPackageNames": ["actions/runner"],
"extractVersion": "^v(?<version>.*)$",
"automerge": true
}
],
"regexManagers": [
{
// use https://github.com/actions/runner/releases
"fileMatch": [
".github/workflows/runners.yaml"
],
"matchStrings": ["RUNNER_VERSION: +(?<currentValue>.*?)\\n"],
"depNameTemplate": "actions/runner",
"datasourceTemplate": "github-releases"
},
{
"fileMatch": [
"runner/Makefile",
"Makefile"
],
"matchStrings": ["RUNNER_VERSION \\?= +(?<currentValue>.*?)\\n"],
"depNameTemplate": "actions/runner",
"datasourceTemplate": "github-releases"
},
{
"fileMatch": [
"runner/actions-runner.ubuntu-20.04.dockerfile",
"runner/actions-runner.ubuntu-22.04.dockerfile",
"runner/actions-runner-dind.ubuntu-20.04.dockerfile",
"runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile"
],
"matchStrings": ["RUNNER_VERSION=+(?<currentValue>.*?)\\n"],
"depNameTemplate": "actions/runner",
"datasourceTemplate": "github-releases"
}
]
}

View File

@@ -29,7 +29,14 @@ jobs:
release-controller:
name: Release
runs-on: ubuntu-latest
if: ${{ !contains(github.event.release.name, 'gha-runner-scale-set-') }}
steps:
- name: Debug
run: |
echo "${{ github.event.release.name }}"
echo "${{ github.event.release.tag_name }}"
echo "${{ github.event }}"
- name: Checkout
uses: actions/checkout@v3

View File

@@ -17,7 +17,7 @@ env:
PUSH_TO_REGISTRIES: true
TARGET_ORG: actions-runner-controller
TARGET_WORKFLOW: release-runners.yaml
DOCKER_VERSION: 20.10.21
DOCKER_VERSION: 20.10.23
RUNNER_CONTAINER_HOOKS_VERSION: 0.2.0
jobs:

View File

@@ -93,7 +93,7 @@ jobs:
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" .github/workflows/e2e_test_linux_vm.yaml
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" .github/workflows/e2e-test-linux-vm.yaml
- name: Commit changes
run: |

View File

@@ -9,12 +9,16 @@ on:
- '.github/workflows/validate-chart.yaml'
- '!charts/actions-runner-controller/docs/**'
- '!**.md'
- '!charts/gha-runner-scale-set-controller/**'
- '!charts/gha-runner-scale-set/**'
push:
paths:
- 'charts/**'
- '.github/workflows/validate-chart.yaml'
- '!charts/actions-runner-controller/docs/**'
- '!**.md'
- '!charts/gha-runner-scale-set-controller/**'
- '!charts/gha-runner-scale-set/**'
workflow_dispatch:
env:
KUBE_SCORE_VERSION: 1.10.0

View File

@@ -0,0 +1,134 @@
name: Validate Helm Chart (gha-runner-scale-set-controller and gha-runner-scale-set)
on:
pull_request:
branches:
- master
paths:
- 'charts/**'
- '.github/workflows/validate-gha-chart.yaml'
- '!charts/actions-runner-controller/**'
- '!**.md'
push:
paths:
- 'charts/**'
- '.github/workflows/validate-gha-chart.yaml'
- '!charts/actions-runner-controller/**'
- '!**.md'
workflow_dispatch:
env:
KUBE_SCORE_VERSION: 1.16.1
HELM_VERSION: v3.8.0
permissions:
contents: read
jobs:
validate-chart:
name: Lint Chart
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
with:
version: ${{ env.HELM_VERSION }}
- name: Set up kube-score
run: |
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
chmod 755 kube-score
- name: Kube-score generated manifests
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
--ignore-test pod-networkpolicy
--ignore-test deployment-has-poddisruptionbudget
--ignore-test deployment-has-host-podantiaffinity
--ignore-test container-security-context
--ignore-test pod-probes
--ignore-test container-image-tag
--enable-optional-test container-security-context-privileged
--enable-optional-test container-security-context-readonlyrootfilesystem
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v4
with:
python-version: '3.7'
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.3.1
- name: Set up latest version chart-testing
run: |
echo 'deb [trusted=yes] https://repo.goreleaser.com/apt/ /' | sudo tee /etc/apt/sources.list.d/goreleaser.list
sudo apt update
sudo apt install goreleaser
git clone https://github.com/helm/chart-testing
cd chart-testing
unset CT_CONFIG_DIR
goreleaser build --clean --skip-validate
./dist/chart-testing_linux_amd64_v1/ct version
echo 'Adding ct directory to PATH...'
echo "$RUNNER_TEMP/chart-testing/dist/chart-testing_linux_amd64_v1" >> "$GITHUB_PATH"
echo 'Setting CT_CONFIG_DIR...'
echo "CT_CONFIG_DIR=$RUNNER_TEMP/chart-testing/etc" >> "$GITHUB_ENV"
working-directory: ${{ runner.temp }}
- name: Run chart-testing (list-changed)
id: list-changed
run: |
ct version
changed=$(ct list-changed --config charts/.ci/ct-config-gha.yaml)
if [[ -n "$changed" ]]; then
echo "::set-output name=changed::true"
fi
- name: Run chart-testing (lint)
run: |
ct lint --config charts/.ci/ct-config-gha.yaml
- name: Set up docker buildx
uses: docker/setup-buildx-action@v2
if: steps.list-changed.outputs.changed == 'true'
with:
version: latest
- name: Build controller image
uses: docker/build-push-action@v3
if: steps.list-changed.outputs.changed == 'true'
with:
file: Dockerfile
platforms: linux/amd64
load: true
build-args: |
DOCKER_IMAGE_NAME=test-arc
VERSION=dev
tags: |
test-arc:dev
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Create kind cluster
uses: helm/kind-action@v1.4.0
if: steps.list-changed.outputs.changed == 'true'
with:
cluster_name: chart-testing
- name: Load image into cluster
if: steps.list-changed.outputs.changed == 'true'
run: |
export DOCKER_IMAGE_NAME=test-arc
export VERSION=dev
export IMG_RESULT=load
make docker-buildx
kind load docker-image test-arc:dev --name chart-testing
- name: Run chart-testing (install)
if: steps.list-changed.outputs.changed == 'true'
run: |
ct install --config charts/.ci/ct-config-gha.yaml

View File

@@ -92,9 +92,14 @@ manager: generate fmt vet
run: generate fmt vet manifests
go run ./main.go
run-scaleset: generate fmt vet
CONTROLLER_MANAGER_POD_NAMESPACE=default \
CONTROLLER_MANAGER_CONTAINER_IMAGE="${DOCKER_IMAGE_NAME}:${VERSION}" \
go run ./main.go --auto-scaling-runner-set-only
# Install CRDs into a cluster
install: manifests
kustomize build config/crd | kubectl apply -f -
kustomize build config/crd | kubectl apply --server-side -f -
# Uninstall CRDs from a cluster
uninstall: manifests
@@ -103,7 +108,7 @@ uninstall: manifests
# Deploy controller in the configured Kubernetes cluster in ~/.kube/config
deploy: manifests
cd config/manager && kustomize edit set image controller=${DOCKER_IMAGE_NAME}:${VERSION}
kustomize build config/default | kubectl apply -f -
kustomize build config/default | kubectl apply --server-side -f -
# Generate manifests e.g. CRD, RBAC etc.
manifests: manifests-gen-crds chart-crds

View File

@@ -33,10 +33,14 @@ import (
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
//+kubebuilder:printcolumn:JSONPath=".spec.minRunners",name=Minimum Runners,type=number
//+kubebuilder:printcolumn:JSONPath=".spec.maxRunners",name=Maximum Runners,type=number
//+kubebuilder:printcolumn:JSONPath=".status.currentRunners",name=Current Runners,type=number
//+kubebuilder:printcolumn:JSONPath=".spec.minRunners",name=Minimum Runners,type=integer
//+kubebuilder:printcolumn:JSONPath=".spec.maxRunners",name=Maximum Runners,type=integer
//+kubebuilder:printcolumn:JSONPath=".status.currentRunners",name=Current Runners,type=integer
//+kubebuilder:printcolumn:JSONPath=".status.state",name=State,type=string
//+kubebuilder:printcolumn:JSONPath=".status.pendingEphemeralRunners",name=Pending Runners,type=integer
//+kubebuilder:printcolumn:JSONPath=".status.runningEphemeralRunners",name=Running Runners,type=integer
//+kubebuilder:printcolumn:JSONPath=".status.finishedEphemeralRunners",name=Finished Runners,type=integer
//+kubebuilder:printcolumn:JSONPath=".status.deletingEphemeralRunners",name=Deleting Runners,type=integer
// AutoscalingRunnerSet is the Schema for the autoscalingrunnersets API
type AutoscalingRunnerSet struct {
@@ -228,10 +232,19 @@ type ProxyServerConfig struct {
// AutoscalingRunnerSetStatus defines the observed state of AutoscalingRunnerSet
type AutoscalingRunnerSetStatus struct {
// +optional
CurrentRunners int `json:"currentRunners,omitempty"`
CurrentRunners int `json:"currentRunners"`
// +optional
State string `json:"state,omitempty"`
State string `json:"state"`
// EphemeralRunner counts separated by the stage ephemeral runners are in, taken from the EphemeralRunnerSet
//+optional
PendingEphemeralRunners int `json:"pendingEphemeralRunners"`
// +optional
RunningEphemeralRunners int `json:"runningEphemeralRunners"`
// +optional
FailedEphemeralRunners int `json:"failedEphemeralRunners"`
}
func (ars *AutoscalingRunnerSet) ListenerSpecHash() string {

View File

@@ -31,13 +31,27 @@ type EphemeralRunnerSetSpec struct {
// EphemeralRunnerSetStatus defines the observed state of EphemeralRunnerSet
type EphemeralRunnerSetStatus struct {
// CurrentReplicas is the number of currently running EphemeralRunner resources being managed by this EphemeralRunnerSet.
CurrentReplicas int `json:"currentReplicas,omitempty"`
CurrentReplicas int `json:"currentReplicas"`
// EphemeralRunner counts separated by the stage ephemeral runners are in
// +optional
PendingEphemeralRunners int `json:"pendingEphemeralRunners"`
// +optional
RunningEphemeralRunners int `json:"runningEphemeralRunners"`
// +optional
FailedEphemeralRunners int `json:"failedEphemeralRunners"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name="DesiredReplicas",type="integer"
// +kubebuilder:printcolumn:JSONPath=".status.currentReplicas", name="CurrentReplicas",type="integer"
//+kubebuilder:printcolumn:JSONPath=".status.pendingEphemeralRunners",name=Pending Runners,type=integer
//+kubebuilder:printcolumn:JSONPath=".status.runningEphemeralRunners",name=Running Runners,type=integer
//+kubebuilder:printcolumn:JSONPath=".status.finishedEphemeralRunners",name=Finished Runners,type=integer
//+kubebuilder:printcolumn:JSONPath=".status.deletingEphemeralRunners",name=Deleting Runners,type=integer
// EphemeralRunnerSet is the Schema for the ephemeralrunnersets API
type EphemeralRunnerSet struct {
metav1.TypeMeta `json:",inline"`

View File

@@ -0,0 +1,9 @@
# This file defines the config for "ct" (chart tester) used by the helm linting GitHub workflow
lint-conf: charts/.ci/lint-config.yaml
chart-repos:
- jetstack=https://charts.jetstack.io
check-version-increment: false # Disable checking that the chart version has been bumped
charts:
- charts/gha-runner-scale-set-controller
- charts/gha-runner-scale-set
skip-clean-up: true

View File

@@ -5,5 +5,3 @@ chart-repos:
check-version-increment: false # Disable checking that the chart version has been bumped
charts:
- charts/actions-runner-controller
- charts/gha-runner-scale-set-controller
- charts/gha-runner-scale-set

View File

@@ -0,0 +1,5 @@
# Set the following to dummy values.
# This is only useful in CI
image:
repository: test-arc
tag: dev

View File

@@ -17,16 +17,28 @@ spec:
- additionalPrinterColumns:
- jsonPath: .spec.minRunners
name: Minimum Runners
type: number
type: integer
- jsonPath: .spec.maxRunners
name: Maximum Runners
type: number
type: integer
- jsonPath: .status.currentRunners
name: Current Runners
type: number
type: integer
- jsonPath: .status.state
name: State
type: string
- jsonPath: .status.pendingEphemeralRunners
name: Pending Runners
type: integer
- jsonPath: .status.runningEphemeralRunners
name: Running Runners
type: integer
- jsonPath: .status.finishedEphemeralRunners
name: Finished Runners
type: integer
- jsonPath: .status.deletingEphemeralRunners
name: Deleting Runners
type: integer
name: v1alpha1
schema:
openAPIV3Schema:
@@ -4306,6 +4318,12 @@ spec:
properties:
currentRunners:
type: integer
failedEphemeralRunners:
type: integer
pendingEphemeralRunners:
type: integer
runningEphemeralRunners:
type: integer
state:
type: string
type: object

View File

@@ -21,6 +21,18 @@ spec:
- jsonPath: .status.currentReplicas
name: CurrentReplicas
type: integer
- jsonPath: .status.pendingEphemeralRunners
name: Pending Runners
type: integer
- jsonPath: .status.runningEphemeralRunners
name: Running Runners
type: integer
- jsonPath: .status.finishedEphemeralRunners
name: Finished Runners
type: integer
- jsonPath: .status.deletingEphemeralRunners
name: Deleting Runners
type: integer
name: v1alpha1
schema:
openAPIV3Schema:
@@ -4296,6 +4308,14 @@ spec:
currentReplicas:
description: CurrentReplicas is the number of currently running EphemeralRunner resources being managed by this EphemeralRunnerSet.
type: integer
failedEphemeralRunners:
type: integer
pendingEphemeralRunners:
type: integer
runningEphemeralRunners:
type: integer
required:
- currentReplicas
type: object
type: object
served: true

View File

@@ -54,10 +54,8 @@ spec:
command:
- "/manager"
env:
- name: CONTROLLER_MANAGER_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: CONTROLLER_MANAGER_CONTAINER_IMAGE
value: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
- name: CONTROLLER_MANAGER_POD_NAMESPACE
valueFrom:
fieldRef:
@@ -98,4 +96,4 @@ spec:
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -261,9 +261,11 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
assert.Nil(t, deployment.Spec.Template.Spec.Affinity)
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 0)
managerImage := "ghcr.io/actions/gha-runner-scale-set-controller:dev"
assert.Len(t, deployment.Spec.Template.Spec.Containers, 1)
assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name)
assert.Equal(t, "ghcr.io/actions/gha-runner-scale-set-controller:dev", deployment.Spec.Template.Spec.Containers[0].Image)
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Image)
assert.Equal(t, corev1.PullIfNotPresent, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
@@ -274,8 +276,8 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1])
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAME", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, "metadata.name", deployment.Spec.Template.Spec.Containers[0].Env[0].ValueFrom.FieldRef.FieldPath)
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
@@ -375,9 +377,11 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 1)
assert.Equal(t, "foo", deployment.Spec.Template.Spec.Tolerations[0].Key)
managerImage := "ghcr.io/actions/gha-runner-scale-set-controller:dev"
assert.Len(t, deployment.Spec.Template.Spec.Containers, 1)
assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name)
assert.Equal(t, "ghcr.io/actions/gha-runner-scale-set-controller:dev", deployment.Spec.Template.Spec.Containers[0].Image)
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Image)
assert.Equal(t, corev1.PullAlways, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
@@ -389,8 +393,8 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2])
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAME", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, "metadata.name", deployment.Spec.Template.Spec.Containers[0].Env[0].ValueFrom.FieldRef.FieldPath)
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)

View File

@@ -3,4 +3,4 @@
githubConfigUrl: https://github.com/actions/actions-runner-controller
githubConfigSecret:
github_token: test
github_token: test

View File

@@ -17,16 +17,28 @@ spec:
- additionalPrinterColumns:
- jsonPath: .spec.minRunners
name: Minimum Runners
type: number
type: integer
- jsonPath: .spec.maxRunners
name: Maximum Runners
type: number
type: integer
- jsonPath: .status.currentRunners
name: Current Runners
type: number
type: integer
- jsonPath: .status.state
name: State
type: string
- jsonPath: .status.pendingEphemeralRunners
name: Pending Runners
type: integer
- jsonPath: .status.runningEphemeralRunners
name: Running Runners
type: integer
- jsonPath: .status.finishedEphemeralRunners
name: Finished Runners
type: integer
- jsonPath: .status.deletingEphemeralRunners
name: Deleting Runners
type: integer
name: v1alpha1
schema:
openAPIV3Schema:
@@ -4306,6 +4318,12 @@ spec:
properties:
currentRunners:
type: integer
failedEphemeralRunners:
type: integer
pendingEphemeralRunners:
type: integer
runningEphemeralRunners:
type: integer
state:
type: string
type: object

View File

@@ -21,6 +21,18 @@ spec:
- jsonPath: .status.currentReplicas
name: CurrentReplicas
type: integer
- jsonPath: .status.pendingEphemeralRunners
name: Pending Runners
type: integer
- jsonPath: .status.runningEphemeralRunners
name: Running Runners
type: integer
- jsonPath: .status.finishedEphemeralRunners
name: Finished Runners
type: integer
- jsonPath: .status.deletingEphemeralRunners
name: Deleting Runners
type: integer
name: v1alpha1
schema:
openAPIV3Schema:
@@ -4296,6 +4308,14 @@ spec:
currentReplicas:
description: CurrentReplicas is the number of currently running EphemeralRunner resources being managed by this EphemeralRunnerSet.
type: integer
failedEphemeralRunners:
type: integer
pendingEphemeralRunners:
type: integer
runningEphemeralRunners:
type: integer
required:
- currentReplicas
type: object
type: object
served: true

View File

@@ -0,0 +1,10 @@
source:
kind: Deployment
name: controller-manager
fieldPath: spec.template.spec.containers.[name=manager].image
targets:
- select:
kind: Deployment
name: controller-manager
fieldPaths:
- spec.template.spec.containers.[name=manager].env.[name=CONTROLLER_MANAGER_CONTAINER_IMAGE].value

View File

@@ -6,3 +6,6 @@ images:
- name: controller
newName: summerwind/actions-runner-controller
newTag: dev
replacements:
- path: env-replacement.yaml

View File

@@ -50,10 +50,8 @@ spec:
optional: true
- name: GITHUB_APP_PRIVATE_KEY
value: /etc/actions-runner-controller/github_app_private_key
- name: CONTROLLER_MANAGER_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: CONTROLLER_MANAGER_CONTAINER_IMAGE
value: CONTROLLER_MANAGER_CONTAINER_IMAGE
- name: CONTROLLER_MANAGER_POD_NAMESPACE
valueFrom:
fieldRef:

View File

@@ -238,6 +238,9 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
if latestRunnerSet.Status.CurrentReplicas != autoscalingRunnerSet.Status.CurrentRunners {
if err := patchSubResource(ctx, r.Status(), autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Status.CurrentRunners = latestRunnerSet.Status.CurrentReplicas
obj.Status.PendingEphemeralRunners = latestRunnerSet.Status.PendingEphemeralRunners
obj.Status.RunningEphemeralRunners = latestRunnerSet.Status.RunningEphemeralRunners
obj.Status.FailedEphemeralRunners = latestRunnerSet.Status.FailedEphemeralRunners
}); err != nil {
log.Error(err, "Failed to update autoscaling runner set status with current runner count")
return ctrl.Result{}, err

View File

@@ -157,23 +157,6 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() {
err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace))
Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet")
Expect(len(runnerSetList.Items)).To(BeEquivalentTo(1), "Only one EphemeralRunnerSet should be created")
runnerSet := runnerSetList.Items[0]
statusUpdate := runnerSet.DeepCopy()
statusUpdate.Status.CurrentReplicas = 100
err = k8sClient.Status().Patch(ctx, statusUpdate, client.MergeFrom(&runnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to patch EphemeralRunnerSet status")
Eventually(
func() (int, error) {
updated := new(v1alpha1.AutoscalingRunnerSet)
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, updated)
if err != nil {
return 0, fmt.Errorf("failed to get AutoScalingRunnerSet: %w", err)
}
return updated.Status.CurrentRunners, nil
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval).Should(BeEquivalentTo(100), "AutoScalingRunnerSet status should be updated")
})
})
@@ -398,9 +381,75 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() {
return updated.Annotations[runnerScaleSetRunnerGroupNameKey], nil
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("testgroup2"), "AutoScalingRunnerSet should have the runner group in its annotation")
autoscalingRunnerSetTestInterval,
).Should(BeEquivalentTo("testgroup2"), "AutoScalingRunnerSet should have the runner group in its annotation")
})
})
It("Should update Status on EphemeralRunnerSet status Update", func() {
ars := new(v1alpha1.AutoscalingRunnerSet)
Eventually(
func() (bool, error) {
err := k8sClient.Get(
ctx,
client.ObjectKey{
Name: autoscalingRunnerSet.Name,
Namespace: autoscalingRunnerSet.Namespace,
},
ars,
)
if err != nil {
return false, err
}
return true, nil
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval,
).Should(BeTrue(), "AutoscalingRunnerSet should be created")
runnerSetList := new(v1alpha1.EphemeralRunnerSetList)
Eventually(func() (int, error) {
err := k8sClient.List(ctx, runnerSetList, client.InNamespace(ars.Namespace))
if err != nil {
return 0, err
}
return len(runnerSetList.Items), nil
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval,
).Should(BeEquivalentTo(1), "Failed to fetch runner set list")
runnerSet := runnerSetList.Items[0]
statusUpdate := runnerSet.DeepCopy()
statusUpdate.Status.CurrentReplicas = 6
statusUpdate.Status.FailedEphemeralRunners = 1
statusUpdate.Status.RunningEphemeralRunners = 2
statusUpdate.Status.PendingEphemeralRunners = 3
desiredStatus := v1alpha1.AutoscalingRunnerSetStatus{
CurrentRunners: statusUpdate.Status.CurrentReplicas,
State: "",
PendingEphemeralRunners: statusUpdate.Status.PendingEphemeralRunners,
RunningEphemeralRunners: statusUpdate.Status.RunningEphemeralRunners,
FailedEphemeralRunners: statusUpdate.Status.FailedEphemeralRunners,
}
err := k8sClient.Status().Patch(ctx, statusUpdate, client.MergeFrom(&runnerSet))
Expect(err).NotTo(HaveOccurred(), "Failed to patch runner set status")
Eventually(
func() (v1alpha1.AutoscalingRunnerSetStatus, error) {
updated := new(v1alpha1.AutoscalingRunnerSet)
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, updated)
if err != nil {
return v1alpha1.AutoscalingRunnerSetStatus{}, fmt.Errorf("failed to get AutoScalingRunnerSet: %w", err)
}
return updated.Status, nil
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval,
).Should(BeEquivalentTo(desiredStatus), "AutoScalingRunnerSet status should be updated")
})
})
var _ = Describe("Test AutoScalingController updates", func() {

View File

@@ -200,11 +200,18 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
}
}
desiredStatus := v1alpha1.EphemeralRunnerSetStatus{
CurrentReplicas: total,
PendingEphemeralRunners: len(pendingEphemeralRunners),
RunningEphemeralRunners: len(runningEphemeralRunners),
FailedEphemeralRunners: len(failedEphemeralRunners),
}
// Update the status if needed.
if ephemeralRunnerSet.Status.CurrentReplicas != total {
if ephemeralRunnerSet.Status != desiredStatus {
log.Info("Updating status with current runners count", "count", total)
if err := patchSubResource(ctx, r.Status(), ephemeralRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) {
obj.Status.CurrentReplicas = total
obj.Status = desiredStatus
}); err != nil {
log.Error(err, "Failed to update status with current runners count")
return ctrl.Result{}, err

View File

@@ -559,6 +559,181 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(0), "0 EphemeralRunner should be created")
})
It("Should update status on Ephemeral Runner state changes", func() {
created := new(actionsv1alpha1.EphemeralRunnerSet)
Eventually(
func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created)
},
ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval,
).Should(Succeed(), "EphemeralRunnerSet should be created")
// Scale up the EphemeralRunnerSet
updated := created.DeepCopy()
updated.Spec.Replicas = 3
err := k8sClient.Update(ctx, updated)
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet replica count")
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
Eventually(
func() (bool, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return false, err
}
if len(runnerList.Items) != 3 {
return false, err
}
var pendingOriginal *v1alpha1.EphemeralRunner
var runningOriginal *v1alpha1.EphemeralRunner
var failedOriginal *v1alpha1.EphemeralRunner
var empty []*v1alpha1.EphemeralRunner
for _, runner := range runnerList.Items {
switch runner.Status.RunnerId {
case 101:
pendingOriginal = runner.DeepCopy()
case 102:
runningOriginal = runner.DeepCopy()
case 103:
failedOriginal = runner.DeepCopy()
default:
empty = append(empty, runner.DeepCopy())
}
}
refetch := false
if pendingOriginal == nil { // if NO pending
refetch = true
pendingOriginal = empty[0]
empty = empty[1:]
pending := pendingOriginal.DeepCopy()
pending.Status.RunnerId = 101
pending.Status.Phase = corev1.PodPending
err = k8sClient.Status().Patch(ctx, pending, client.MergeFrom(pendingOriginal))
if err != nil {
return false, err
}
}
if runningOriginal == nil { // if NO running
refetch = true
runningOriginal = empty[0]
empty = empty[1:]
running := runningOriginal.DeepCopy()
running.Status.RunnerId = 102
running.Status.Phase = corev1.PodRunning
err = k8sClient.Status().Patch(ctx, running, client.MergeFrom(runningOriginal))
if err != nil {
return false, err
}
}
if failedOriginal == nil { // if NO failed
refetch = true
failedOriginal = empty[0]
failed := pendingOriginal.DeepCopy()
failed.Status.RunnerId = 103
failed.Status.Phase = corev1.PodFailed
err = k8sClient.Status().Patch(ctx, failed, client.MergeFrom(failedOriginal))
if err != nil {
return false, err
}
}
return !refetch, nil
},
ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval,
).Should(BeTrue(), "Failed to eventually update to one pending, one running and one failed")
desiredStatus := v1alpha1.EphemeralRunnerSetStatus{
CurrentReplicas: 3,
PendingEphemeralRunners: 1,
RunningEphemeralRunners: 1,
FailedEphemeralRunners: 1,
}
Eventually(
func() (v1alpha1.EphemeralRunnerSetStatus, error) {
updated := new(v1alpha1.EphemeralRunnerSet)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, updated)
if err != nil {
return v1alpha1.EphemeralRunnerSetStatus{}, err
}
return updated.Status, nil
},
ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval,
).Should(BeEquivalentTo(desiredStatus), "Status is not eventually updated to the desired one")
updated = new(v1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, updated)
Expect(err).NotTo(HaveOccurred(), "Failed to fetch ephemeral runner set")
updatedOriginal := updated.DeepCopy()
updated.Spec.Replicas = 0
err = k8sClient.Patch(ctx, updated, client.MergeFrom(updatedOriginal))
Expect(err).NotTo(HaveOccurred(), "Failed to patch ephemeral runner set with 0 replicas")
Eventually(
func() (int, error) {
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
return len(runnerList.Items), nil
},
ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval,
).Should(BeEquivalentTo(1), "Failed to eventually scale down")
desiredStatus = v1alpha1.EphemeralRunnerSetStatus{
CurrentReplicas: 1,
PendingEphemeralRunners: 0,
RunningEphemeralRunners: 0,
FailedEphemeralRunners: 1,
}
Eventually(
func() (v1alpha1.EphemeralRunnerSetStatus, error) {
updated := new(v1alpha1.EphemeralRunnerSet)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, updated)
if err != nil {
return v1alpha1.EphemeralRunnerSetStatus{}, err
}
return updated.Status, nil
},
ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval,
).Should(BeEquivalentTo(desiredStatus), "Status is not eventually updated to the desired one")
err = k8sClient.Delete(ctx, &runnerList.Items[0])
Expect(err).To(BeNil(), "Failed to delete failed ephemeral runner")
desiredStatus = v1alpha1.EphemeralRunnerSetStatus{} // empty
Eventually(
func() (v1alpha1.EphemeralRunnerSetStatus, error) {
updated := new(v1alpha1.EphemeralRunnerSet)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, updated)
if err != nil {
return v1alpha1.EphemeralRunnerSetStatus{}, err
}
return updated.Status, nil
},
ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval,
).Should(BeEquivalentTo(desiredStatus), "Status is not eventually updated to the desired one")
})
})
})
@@ -821,12 +996,13 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status")
updatedRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, updatedRunnerSet)
runnerSet := new(actionsv1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, runnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
updatedRunnerSet := runnerSet.DeepCopy()
updatedRunnerSet.Spec.Replicas = 0
err = k8sClient.Update(ctx, updatedRunnerSet)
err = k8sClient.Patch(ctx, updatedRunnerSet, client.MergeFrom(runnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
Eventually(

View File

@@ -124,6 +124,31 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7
arc-runners arc-runner-set-rmrgw-runner-p9p5n 1/1 Running 0 21s
```
### Upgrade to newer versions
Upgrading actions-runner-controller requires a few extra steps because CRDs will not be automatically upgraded (this is a helm limitation).
1. Uninstall the autoscaling runner set first
```bash
INSTALLATION_NAME="arc-runner-set"
NAMESPACE="arc-runners"
helm uninstall "${INSTALLATION_NAME}" --namespace "${NAMESPACE}"
```
1. Wait for all the pods to drain
1. Pull the new helm chart, unpack it and update the CRDs. When applying this step, don't forget to replace `<PATH>` with the path of the `gha-runner-scale-set-controller` helm chart:
```bash
helm pull oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller \
--version 0.3.0 \
--untar && \
kubectl replace -f <PATH>/gha-runner-scale-set-controller/crds/
```
1. Reinstall actions-runner-controller using the steps from the previous section
## Troubleshooting
### Check the logs

173
main.go
View File

@@ -17,7 +17,6 @@ limitations under the License.
package main
import (
"context"
"flag"
"fmt"
"os"
@@ -33,9 +32,7 @@ import (
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/logging"
"github.com/kelseyhightower/envconfig"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
ctrl "sigs.k8s.io/controller-runtime"
@@ -47,9 +44,7 @@ const (
defaultDockerImage = "docker:dind"
)
var (
scheme = runtime.NewScheme()
)
var scheme = runtime.NewScheme()
func init() {
_ = clientgoscheme.AddToScheme(scheme)
@@ -68,6 +63,7 @@ func (i *stringSlice) Set(value string) error {
*i = append(*i, value)
return nil
}
func main() {
var (
err error
@@ -170,17 +166,69 @@ func main() {
os.Exit(1)
}
multiClient := actionssummerwindnet.NewMultiGitHubClient(
mgr.GetClient(),
ghClient,
)
if autoScalingRunnerSetOnly {
managerImage := os.Getenv("CONTROLLER_MANAGER_CONTAINER_IMAGE")
if managerImage == "" {
log.Error(err, "unable to obtain listener image")
os.Exit(1)
}
managerNamespace := os.Getenv("CONTROLLER_MANAGER_POD_NAMESPACE")
if managerNamespace == "" {
log.Error(err, "unable to obtain manager pod namespace")
os.Exit(1)
}
actionsMultiClient := actions.NewMultiClient(
"actions-runner-controller/"+build.Version,
log.WithName("actions-clients"),
)
actionsMultiClient := actions.NewMultiClient(
"actions-runner-controller/"+build.Version,
log.WithName("actions-clients"),
)
if err = (&actionsgithubcom.AutoscalingRunnerSetReconciler{
Client: mgr.GetClient(),
Log: log.WithName("AutoscalingRunnerSet"),
Scheme: mgr.GetScheme(),
ControllerNamespace: managerNamespace,
DefaultRunnerScaleSetListenerImage: managerImage,
ActionsClient: actionsMultiClient,
DefaultRunnerScaleSetListenerImagePullSecrets: autoScalerImagePullSecrets,
}).SetupWithManager(mgr); err != nil {
log.Error(err, "unable to create controller", "controller", "AutoscalingRunnerSet")
os.Exit(1)
}
if err = (&actionsgithubcom.EphemeralRunnerReconciler{
Client: mgr.GetClient(),
Log: log.WithName("EphemeralRunner"),
Scheme: mgr.GetScheme(),
ActionsClient: actionsMultiClient,
}).SetupWithManager(mgr); err != nil {
log.Error(err, "unable to create controller", "controller", "EphemeralRunner")
os.Exit(1)
}
if err = (&actionsgithubcom.EphemeralRunnerSetReconciler{
Client: mgr.GetClient(),
Log: log.WithName("EphemeralRunnerSet"),
Scheme: mgr.GetScheme(),
ActionsClient: actionsMultiClient,
}).SetupWithManager(mgr); err != nil {
log.Error(err, "unable to create controller", "controller", "EphemeralRunnerSet")
os.Exit(1)
}
if err = (&actionsgithubcom.AutoscalingListenerReconciler{
Client: mgr.GetClient(),
Log: log.WithName("AutoscalingListener"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
log.Error(err, "unable to create controller", "controller", "AutoscalingListener")
os.Exit(1)
}
} else {
multiClient := actionssummerwindnet.NewMultiGitHubClient(
mgr.GetClient(),
ghClient,
)
if !autoScalingRunnerSetOnly {
runnerReconciler := &actionssummerwindnet.RunnerReconciler{
Client: mgr.GetClient(),
Log: log.WithName("runner"),
@@ -314,94 +362,15 @@ func main() {
log.Error(err, "unable to create webhook", "webhook", "RunnerReplicaSet")
os.Exit(1)
}
}
}
// We use this environment avariable to turn on the ScaleSet related controllers.
// Otherwise ARC's legacy chart is unable to deploy a working ARC controller-manager pod,
// due to that the chart does not contain new actions.* CRDs while ARC requires those CRDs.
//
// We might have used a more explicitly named environment variable for this,
// e.g. "CONTROLLER_MANAGER_ENABLE_SCALE_SET" to explicitly enable the new controllers,
// or "CONTROLLER_MANAGER_DISABLE_SCALE_SET" to explicitly disable the new controllers.
// However, doing so would affect either private ARC testers or current ARC users
// who run ARC without those variabls.
mgrPodName := os.Getenv("CONTROLLER_MANAGER_POD_NAME")
if mgrPodName != "" {
mgrPodNamespace := os.Getenv("CONTROLLER_MANAGER_POD_NAMESPACE")
var mgrPod corev1.Pod
err = mgr.GetAPIReader().Get(context.Background(), types.NamespacedName{Namespace: mgrPodNamespace, Name: mgrPodName}, &mgrPod)
if err != nil {
log.Error(err, fmt.Sprintf("unable to obtain manager pod: %s (%s)", mgrPodName, mgrPodNamespace))
os.Exit(1)
}
var mgrContainer *corev1.Container
for _, container := range mgrPod.Spec.Containers {
if container.Name == "manager" {
mgrContainer = &container
break
injector := &actionssummerwindnet.PodRunnerTokenInjector{
Client: mgr.GetClient(),
GitHubClient: multiClient,
Log: ctrl.Log.WithName("webhook").WithName("PodRunnerTokenInjector"),
}
if err = injector.SetupWithManager(mgr); err != nil {
log.Error(err, "unable to create webhook server", "webhook", "PodRunnerTokenInjector")
os.Exit(1)
}
}
if mgrContainer != nil {
log.Info("Detected manager container", "image", mgrContainer.Image)
} else {
log.Error(err, "unable to obtain manager container image")
os.Exit(1)
}
if err = (&actionsgithubcom.AutoscalingRunnerSetReconciler{
Client: mgr.GetClient(),
Log: log.WithName("AutoscalingRunnerSet"),
Scheme: mgr.GetScheme(),
ControllerNamespace: mgrPodNamespace,
DefaultRunnerScaleSetListenerImage: mgrContainer.Image,
ActionsClient: actionsMultiClient,
DefaultRunnerScaleSetListenerImagePullSecrets: autoScalerImagePullSecrets,
}).SetupWithManager(mgr); err != nil {
log.Error(err, "unable to create controller", "controller", "AutoscalingRunnerSet")
os.Exit(1)
}
if err = (&actionsgithubcom.EphemeralRunnerReconciler{
Client: mgr.GetClient(),
Log: log.WithName("EphemeralRunner"),
Scheme: mgr.GetScheme(),
ActionsClient: actionsMultiClient,
}).SetupWithManager(mgr); err != nil {
log.Error(err, "unable to create controller", "controller", "EphemeralRunner")
os.Exit(1)
}
if err = (&actionsgithubcom.EphemeralRunnerSetReconciler{
Client: mgr.GetClient(),
Log: log.WithName("EphemeralRunnerSet"),
Scheme: mgr.GetScheme(),
ActionsClient: actionsMultiClient,
}).SetupWithManager(mgr); err != nil {
log.Error(err, "unable to create controller", "controller", "EphemeralRunnerSet")
os.Exit(1)
}
if err = (&actionsgithubcom.AutoscalingListenerReconciler{
Client: mgr.GetClient(),
Log: log.WithName("AutoscalingListener"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
log.Error(err, "unable to create controller", "controller", "AutoscalingListener")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
}
if !disableAdmissionWebhook && !autoScalingRunnerSetOnly {
injector := &actionssummerwindnet.PodRunnerTokenInjector{
Client: mgr.GetClient(),
GitHubClient: multiClient,
Log: ctrl.Log.WithName("webhook").WithName("PodRunnerTokenInjector"),
}
if err = injector.SetupWithManager(mgr); err != nil {
log.Error(err, "unable to create webhook server", "webhook", "PodRunnerTokenInjector")
os.Exit(1)
}
}

View File

@@ -79,14 +79,34 @@ func (reader *EventReader) ProcessWorkflowJobEvent(ctx context.Context, event in
labels["repository_full_name"] = *n
keysAndValues = append(keysAndValues, "repository_full_name", *n)
}
if e.Repo.Owner != nil {
if l := e.Repo.Owner.Login; l != nil {
labels["owner"] = *l
keysAndValues = append(keysAndValues, "owner", *l)
}
}
}
var org string
if e.Org != nil {
if n := e.Org.Name; n != nil {
labels["organization"] = *e.Org.Name
org = *n
keysAndValues = append(keysAndValues, "organization", *n)
}
}
labels["organization"] = org
var wn string
if e.WorkflowJob != nil {
if n := e.WorkflowJob.WorkflowName; n != nil {
wn = *n
keysAndValues = append(keysAndValues, "workflow_name", *n)
}
}
labels["workflow_name"] = wn
log := reader.Log.WithValues(keysAndValues...)
// switch on job status
switch action := e.GetAction(); action {
@@ -102,14 +122,10 @@ func (reader *EventReader) ProcessWorkflowJobEvent(ctx context.Context, event in
parseResult, err := reader.fetchAndParseWorkflowJobLogs(ctx, e)
if err != nil {
reader.Log.Error(err, "reading workflow job log")
log.Error(err, "reading workflow job log")
return
} else {
reader.Log.WithValues("job_name", *e.WorkflowJob.Name, "job_id", fmt.Sprint(*e.WorkflowJob.ID), "repository", *e.Repo.Name, "repository_full_name", *e.Repo.FullName)
if len(*e.Org.Name) > 0 {
reader.Log.WithValues("organization", *e.Org.Name)
}
reader.Log.Info("reading workflow_job logs")
log.Info("reading workflow_job logs")
}
githubWorkflowJobQueueDurationSeconds.With(labels).Observe(parseResult.QueueTime.Seconds())
@@ -122,10 +138,10 @@ func (reader *EventReader) ProcessWorkflowJobEvent(ctx context.Context, event in
parseResult, err := reader.fetchAndParseWorkflowJobLogs(ctx, e)
if err != nil {
reader.Log.Error(err, "reading workflow job log")
log.Error(err, "reading workflow job log")
return
} else {
reader.Log.Info("reading workflow_job logs", keysAndValues...)
log.Info("reading workflow_job logs", keysAndValues...)
}
if *e.WorkflowJob.Conclusion == "failure" {

View File

@@ -71,14 +71,19 @@ var (
}
)
func metricLabels(extras ...string) []string {
return append(append([]string{}, commonLabels...), extras...)
}
var (
commonLabels = []string{"runs_on", "job_name", "organization", "repository", "repository_full_name", "owner", "workflow_name"}
githubWorkflowJobQueueDurationSeconds = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "github_workflow_job_queue_duration_seconds",
Help: "Queue times for workflow jobs in seconds",
Buckets: runtimeBuckets,
},
[]string{"runs_on", "job_name"},
metricLabels(),
)
githubWorkflowJobRunDurationSeconds = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
@@ -86,41 +91,41 @@ var (
Help: "Run times for workflow jobs in seconds",
Buckets: runtimeBuckets,
},
[]string{"runs_on", "job_name", "job_conclusion"},
metricLabels("job_conclusion"),
)
githubWorkflowJobConclusionsTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "github_workflow_job_conclusions_total",
Help: "Conclusions for tracked workflow jobs",
},
[]string{"runs_on", "job_name", "job_conclusion"},
metricLabels("job_conclusion"),
)
githubWorkflowJobsQueuedTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "github_workflow_jobs_queued_total",
Help: "Total count of workflow jobs queued (events where job_status=queued)",
},
[]string{"runs_on", "job_name"},
metricLabels(),
)
githubWorkflowJobsStartedTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "github_workflow_jobs_started_total",
Help: "Total count of workflow jobs started (events where job_status=in_progress)",
},
[]string{"runs_on", "job_name"},
metricLabels(),
)
githubWorkflowJobsCompletedTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "github_workflow_jobs_completed_total",
Help: "Total count of workflow jobs completed (events where job_status=completed)",
},
[]string{"runs_on", "job_name"},
metricLabels(),
)
githubWorkflowJobFailuresTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "github_workflow_job_failures_total",
Help: "Conclusions for tracked workflow runs",
},
[]string{"runs_on", "job_name", "failed_step", "exit_code"},
metricLabels("failed_step", "exit_code"),
)
)

View File

@@ -25,7 +25,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/go-logr/logr"
gogithub "github.com/google/go-github/v47/github"
gogithub "github.com/google/go-github/v50/github"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/actions/actions-runner-controller/github"

View File

@@ -8,7 +8,7 @@ TARGETPLATFORM ?= $(shell arch)
RUNNER_VERSION ?= 2.302.1
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.2.0
DOCKER_VERSION ?= 20.10.21
DOCKER_VERSION ?= 20.10.23
# default list of platforms for which multiarch image is built
ifeq (${PLATFORMS}, )

View File

@@ -5,7 +5,7 @@ ARG RUNNER_VERSION
ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0
# Docker and Docker Compose arguments
ARG CHANNEL=stable
ARG DOCKER_VERSION=20.10.18
ARG DOCKER_VERSION=20.10.23
ARG DOCKER_COMPOSE_VERSION=v2.16.0
ARG DUMB_INIT_VERSION=1.2.5

View File

@@ -5,7 +5,7 @@ ARG RUNNER_VERSION
ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0
# Docker and Docker Compose arguments
ARG CHANNEL=stable
ARG DOCKER_VERSION=20.10.21
ARG DOCKER_VERSION=20.10.23
ARG DOCKER_COMPOSE_VERSION=v2.16.0
ARG DUMB_INIT_VERSION=1.2.5
ARG RUNNER_USER_UID=1001

View File

@@ -5,7 +5,7 @@ ARG RUNNER_VERSION
ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0
# Docker and Docker Compose arguments
ARG CHANNEL=stable
ARG DOCKER_VERSION=20.10.18
ARG DOCKER_VERSION=20.10.23
ARG DOCKER_COMPOSE_VERSION=v2.16.0
ARG DUMB_INIT_VERSION=1.2.5

View File

@@ -5,7 +5,7 @@ ARG RUNNER_VERSION
ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0
# Docker and Docker Compose arguments
ARG CHANNEL=stable
ARG DOCKER_VERSION=20.10.21
ARG DOCKER_VERSION=20.10.23
ARG DOCKER_COMPOSE_VERSION=v2.16.0
ARG DUMB_INIT_VERSION=1.2.5
ARG RUNNER_USER_UID=1001