Compare commits

...

77 Commits

Author SHA1 Message Date
Nikola Jokic
3be7128f9a Prepare 0.9.2 release (#3530) 2024-05-20 10:58:06 +02:00
Nikola Jokic
3bda9bb240 Refresh session if token expires during delete message (#3529) 2024-05-17 15:16:38 +02:00
Nikola Jokic
ab92e4edc3 Re-use the last desired patch on empty batch (#3453) 2024-05-17 15:12:16 +02:00
Nikola Jokic
fa7a4f584e Extract single place to set up indexers (#3454) 2024-05-17 14:42:46 +02:00
Nikola Jokic
9b51f25800 Rename imports in tests to remove double import and to improve readability (#3455) 2024-05-17 14:37:13 +02:00
Nikola Jokic
ea13873f14 Remove service monitor that is not used in controller chart (#3526) 2024-05-17 13:06:57 +02:00
github-actions[bot]
a6d87c46cd Updates: runner to v2.316.1 (#3496)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2024-05-14 11:24:14 +02:00
Nikola Jokic
51c70a64c3 Include controller version in logs (#3473) 2024-05-13 14:16:36 +02:00
dependabot[bot]
a1b8e0cc3d Bump golang.org/x/sync from 0.6.0 to 0.7.0 (#3482)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-30 08:53:19 +02:00
dependabot[bot]
2889029bc5 Bump github.com/onsi/gomega from 1.30.0 to 1.33.0 (#3462)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-29 12:56:43 +02:00
dependabot[bot]
87f2e00971 Bump go.uber.org/zap from 1.26.0 to 1.27.0 (#3442)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-29 12:56:05 +02:00
dependabot[bot]
d9af241a7d Bump golang.org/x/oauth2 from 0.15.0 to 0.19.0 (#3441)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2024-04-29 12:55:24 +02:00
github-actions[bot]
49490c4421 Updates: runner to v2.316.0 (#3463)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2024-04-24 12:21:30 +01:00
Bryan Peterson
109750f816 propogate arbitrary labels from runnersets to all created resources (#3157) 2024-04-23 11:19:32 +02:00
Nikola Jokic
9e191cdd21 Prepare 0.9.1 release (#3448) 2024-04-17 10:51:28 +02:00
Nikola Jokic
f965dfef73 Shutdown metrics server when listener exits (#3445) 2024-04-16 21:29:03 +02:00
Nikola Jokic
4ee49fee14 Propagate max capacity information to the actions back-end (#3431) 2024-04-16 14:00:40 +02:00
Nikola Jokic
8075e5ee74 Refactor actions client error to include request id (#3430)
Co-authored-by: Francesco Renzi <rentziass@gmail.com>
2024-04-16 12:57:44 +02:00
Nikola Jokic
963ae48a3f Include self correction on empty batch and avoid removing pending runners when cluster is busy (#3426) 2024-04-16 12:55:25 +02:00
nasa9084
98854ef9c0 Fix doc comment for listenerTemplate (#3436) 2024-04-15 11:48:30 +02:00
dependabot[bot]
1987d9eb2e Bump github.com/stretchr/testify from 1.8.4 to 1.9.0 (#3418)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2024-04-12 15:01:52 +02:00
Alexandre Chouinard
0006dd5eb1 Add topologySpreadConstraint to gha-runner-scale-set-controller chart (#3405) 2024-04-12 14:22:41 +02:00
Nikola Jokic
86f1714354 Revert "Bump k8s.io/client-go from 0.28.4 to 0.29.3 (#3416)" (#3432) 2024-04-12 13:51:44 +02:00
dependabot[bot]
f68bbad579 Bump k8s.io/client-go from 0.28.4 to 0.29.3 (#3416)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2024-04-12 13:12:02 +02:00
dependabot[bot]
d3a8a34bb2 Bump golang.org/x/net from 0.20.0 to 0.24.0 (#3417)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-09 07:42:41 +02:00
dependabot[bot]
d515b4a6e0 Bump github.com/onsi/ginkgo/v2 from 2.13.1 to 2.17.1 (#3379)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-08 10:17:02 +02:00
dependabot[bot]
d971fedbe8 Bump github.com/evanphx/json-patch from 5.7.0+incompatible to 5.9.0+incompatible (#3398)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-08 10:11:10 +02:00
dependabot[bot]
6c6d061f0a Bump github.com/cloudflare/circl from 1.3.6 to 1.3.7 (#3206)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-04 14:57:24 -04:00
github-actions[bot]
5b9b9f7ca2 Updates: runner to v2.315.0 container-hooks to v0.6.0 (#3387)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2024-04-03 16:06:30 -04:00
Nikola Jokic
4357525445 Prepare 0.9.0 release (#3388) 2024-03-27 11:54:17 +01:00
Nikola Jokic
1d1790614b Add retry on 401 and 403 for runner-registration (#3377)
Co-authored-by: Francesco Renzi <rentziass@gmail.com>
2024-03-27 10:55:17 +01:00
dependabot[bot]
442d52cd56 Bump github.com/go-logr/logr from 1.3.0 to 1.4.1 (#3383)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2024-03-26 15:19:39 +01:00
Nikola Jokic
b6a95ae879 Change duplicate message key in logs while updating ephemeral runner status (#3380) 2024-03-26 12:57:46 +01:00
dependabot[bot]
9968141086 Bump golang.org/x/sync from 0.5.0 to 0.6.0 (#3384)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-26 09:27:58 +01:00
dependabot[bot]
e59d127d41 Bump golang.org/x/crypto from 0.16.0 to 0.17.0 (#3173)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2024-03-25 16:28:31 +01:00
dependabot[bot]
fb1232c13e Bump google.golang.org/protobuf from 1.31.0 to 1.33.0 (#3349)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2024-03-22 18:04:34 +01:00
Nikola Jokic
7a643a5107 Fix overscaling when the controller is much faster then the listener (#3371)
Co-authored-by: Francesco Renzi <rentziass@gmail.com>
2024-03-20 15:36:12 +01:00
Nikola Jokic
46cfbb6ec7 Fix documented dind expansion (#3368) 2024-03-19 15:24:58 +01:00
Nikola Jokic
c9099a5a56 Add annotation with values hash to re-create listener (#3195) 2024-03-19 14:29:49 +01:00
Hidehito Yabuuchi
48706584fd Propagate runner scale set name annotation to EphemeralRunner (#3098) 2024-03-19 12:50:49 +01:00
Nikola Jokic
2c0e53951b Fix tests and comment string for docker socket mounted path (#3366) 2024-03-19 11:29:07 +01:00
Nikola Jokic
a7af44e042 Deprecation warning of older listener for 0.9.0 release (#3280) 2024-03-18 12:59:41 +01:00
Nikola Jokic
f225fef921 Bump Go version to 1.22.1 (#3290) 2024-03-18 12:46:30 +01:00
Nikola Jokic
814947c60e Update metrics to include repository on job-based label (#3310)
Co-authored-by: Samuel Rats <samuel.rats@teads.com>
2024-03-18 12:45:52 +01:00
Nikola Jokic
039350a0d0 Escape automated updates version to avoid changing stuff that don't exactly match (#3354) 2024-03-18 12:41:12 +01:00
Nikola Jokic
a0fb417f69 Change docker socket path to /var/run/docker.sock (#3337) 2024-03-18 12:40:27 +01:00
Nikola Jokic
f5fd831c2f Add Francesco (@rentziass) to CODEOWNERS (#3362) 2024-03-18 12:08:16 +01:00
github-actions[bot]
753afb75b9 Updates: runner to v2.314.1 (#3308)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Tingluo Huang <tingluohuang@github.com>
2024-02-28 15:43:14 -05:00
Nikola Jokic
309b53143e Prepare 0.8.3 release (#3309) 2024-02-28 10:26:32 +01:00
Nikola Jokic
7da2d7f96a Fix acquire jobs after session refresh ghalistener (#3307) 2024-02-27 17:37:42 +01:00
Ivar Larsson
e06c7edc21 Refer to the correct variable in discovery error message (#3296) 2024-02-26 15:51:07 +01:00
Talia Stocks
9fba37540a Expose volumeMounts and volumes in gha-runner-scale-set-controller (#3260) 2024-02-12 14:47:09 +01:00
github-actions[bot]
a68aa00bd8 Updates: runner to v2.313.0 container-hooks to v0.5.1 (#3270)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2024-02-09 09:44:28 -05:00
dependabot[bot]
9b053102ed Bump github.com/google/uuid from 1.4.0 to 1.6.0 (#3253)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-02-06 15:05:09 +01:00
Nick McClorey
c03fac8fdd Remove Typo in Grafana docs (#3235) 2024-02-02 10:01:22 +01:00
Nikola Jokic
d72774753c Prepare 0.8.2 release (#3249) 2024-01-26 11:03:08 +01:00
Nikola Jokic
f7b6ad901d Add listener graceful termination period and background context after the message is received (#3187) 2024-01-25 15:45:07 +01:00
Nikola Jokic
728f05c844 Delete message session when listener.Listen returns (#3240) 2024-01-25 15:12:19 +01:00
Nikola Jokic
c00465973e Publish metrics in the new ghalistener (#3193) 2024-01-25 14:46:42 +01:00
github-actions[bot]
5f23afaad3 Updates: runner to v2.312.0 (#3229)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2024-01-22 14:17:31 -05:00
Ken Muse
47dfed3ced Add documentation legacy modes warning and links to new docs (#3199) 2024-01-05 19:56:31 +01:00
Nikola Jokic
1f9b7541e6 Prepare 0.8.1 release (#3184) 2023-12-21 17:14:33 +01:00
Nikola Jokic
a029b705cd Fix proxy issue in new listener client (#3181) 2023-12-21 15:35:36 +01:00
Nikola Jokic
3fab744a4f Prepare 0.8.0 release (#3175) 2023-12-20 11:16:56 +01:00
Nikola Jokic
fe8c3bb789 Change listener container name (#3167) 2023-12-19 12:22:52 +01:00
Nikola Jokic
e40874f67f Fix assertion test in wait for delete (#3146) 2023-12-18 17:04:35 +01:00
Serge
d7d479172d Fix override listener pod spec (#3139) (#3161)
Signed-off-by: Serge Logvinov <serge.logvinov@sinextra.dev>
2023-12-18 16:50:06 +01:00
Nikola Jokic
31352924d7 Fix empty env and volumeMounts object on default setup (#3166) 2023-12-18 16:01:34 +01:00
dependabot[bot]
3e4201ac5f Bump k8s.io/client-go from 0.28.3 to 0.28.4 (#3125)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2023-12-14 12:58:35 +01:00
dependabot[bot]
a44b037d6b Bump golang.org/x/oauth2 from 0.14.0 to 0.15.0 (#3127)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-14 12:57:42 +01:00
dependabot[bot]
e11beea49b Bump golang.org/x/net from 0.18.0 to 0.19.0 (#3126)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2023-12-14 09:45:22 +01:00
dependabot[bot]
bfadad0830 Bump github.com/gruntwork-io/terratest from 0.41.24 to 0.46.7 (#3091)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2023-12-14 09:35:07 +01:00
Nikola Jokic
f7eb88ce9c Change minRunners behavior and fix the new listener min runners (#3139) 2023-12-13 19:39:21 +01:00
Nikola Jokic
0fd8eac305 Update user agent for new ghalistener (#3138) 2023-12-08 14:01:22 +01:00
Nikola Jokic
b78cadd901 Refactoring listener app with configurable fallback (#3096) 2023-12-08 13:41:06 +01:00
Nikola Jokic
202a97ab12 Modify user agent format with subsystem and is proxy configured information (#3116) 2023-12-08 13:16:29 +01:00
Toru Komatsu
b08d533105 Record the error when the creation pod fails (#3112)
Signed-off-by: utam0k <k0ma@utam0k.jp>
2023-12-07 21:11:52 +01:00
118 changed files with 67469 additions and 13377 deletions

View File

@@ -193,7 +193,7 @@ runs:
shell: bash shell: bash
run: | run: |
helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-name}} -l app.kubernetes.io/instance=${{ inputs.arc-name }} kubectl wait --timeout=30s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-namespace}} -l app.kubernetes.io/instance=${{ inputs.arc-name }}
- name: Gather logs and cleanup - name: Gather logs and cleanup
shell: bash shell: bash

View File

@@ -78,7 +78,7 @@ jobs:
run: | run: |
RUNNER_MESSAGE="runner to v${RUNNER_LATEST_VERSION}" RUNNER_MESSAGE="runner to v${RUNNER_LATEST_VERSION}"
CONTAINER_HOOKS_MESSAGE="container-hooks to v${CONTAINER_HOOKS_LATEST_VERSION}" CONTAINER_HOOKS_MESSAGE="container-hooks to v${CONTAINER_HOOKS_LATEST_VERSION}"
PR_NAME="Updates:" PR_NAME="Updates:"
if [ "$RUNNER_CURRENT_VERSION" != "$RUNNER_LATEST_VERSION" ] if [ "$RUNNER_CURRENT_VERSION" != "$RUNNER_LATEST_VERSION" ]
then then
@@ -88,7 +88,7 @@ jobs:
then then
PR_NAME="$PR_NAME $CONTAINER_HOOKS_MESSAGE" PR_NAME="$PR_NAME $CONTAINER_HOOKS_MESSAGE"
fi fi
result=$(gh pr list --search "$PR_NAME" --json number --jq ".[].number" --limit 1) result=$(gh pr list --search "$PR_NAME" --json number --jq ".[].number" --limit 1)
if [ -z "$result" ] if [ -z "$result" ]
then then
@@ -120,21 +120,25 @@ jobs:
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: New branch - name: New branch
run: git checkout -b update-runner-"$(date +%Y-%m-%d)" run: git checkout -b update-runner-"$(date +%Y-%m-%d)"
- name: Update files - name: Update files
run: | run: |
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" runner/VERSION CURRENT_VERSION="${RUNNER_CURRENT_VERSION//./\\.}"
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" runner/Makefile LATEST_VERSION="${RUNNER_LATEST_VERSION//./\\.}"
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" Makefile sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/VERSION
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" test/e2e/e2e_test.go sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" runner/VERSION sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" runner/Makefile
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" Makefile CURRENT_VERSION="${CONTAINER_HOOKS_CURRENT_VERSION//./\\.}"
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" test/e2e/e2e_test.go LATEST_VERSION="${CONTAINER_HOOKS_LATEST_VERSION//./\\.}"
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/VERSION
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go
- name: Commit changes - name: Commit changes
run: | run: |

View File

@@ -16,7 +16,7 @@ env:
TARGET_ORG: actions-runner-controller TARGET_ORG: actions-runner-controller
TARGET_REPO: arc_e2e_test_dummy TARGET_REPO: arc_e2e_test_dummy
IMAGE_NAME: "arc-test-image" IMAGE_NAME: "arc-test-image"
IMAGE_VERSION: "0.7.0" IMAGE_VERSION: "0.9.2"
concurrency: concurrency:
# This will make sure we only apply the concurrency limits on pull requests # This will make sure we only apply the concurrency limits on pull requests
@@ -880,3 +880,98 @@ jobs:
helm uninstall "${{ steps.install_arc.outputs.ARC_NAME }}" --namespace "arc-runners" --debug helm uninstall "${{ steps.install_arc.outputs.ARC_NAME }}" --namespace "arc-runners" --debug
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n "${{ steps.install_arc.outputs.ARC_NAME }}" -l app.kubernetes.io/instance="${{ steps.install_arc.outputs.ARC_NAME }}" kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n "${{ steps.install_arc.outputs.ARC_NAME }}" -l app.kubernetes.io/instance="${{ steps.install_arc.outputs.ARC_NAME }}"
kubectl logs deployment/arc-gha-rs-controller -n "arc-systems" kubectl logs deployment/arc-gha-rs-controller -n "arc-systems"
init-with-min-runners:
runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env:
WORKFLOW_FILE: arc-test-workflow.yaml
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.head_ref }}
- uses: ./.github/actions/setup-arc-e2e
id: setup
with:
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
image-name: ${{env.IMAGE_NAME}}
image-tag: ${{env.IMAGE_VERSION}}
target-org: ${{env.TARGET_ORG}}
- name: Install gha-runner-scale-set-controller
id: install_arc_controller
run: |
helm install arc \
--namespace "arc-systems" \
--create-namespace \
--set image.repository=${{ env.IMAGE_NAME }} \
--set image.tag=${{ env.IMAGE_VERSION }} \
--set flags.updateStrategy="eventual" \
./charts/gha-runner-scale-set-controller \
--debug
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
exit 1
fi
sleep 1
count=$((count+1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
kubectl get pod -n arc-systems
kubectl describe deployment arc-gha-rs-controller -n arc-systems
- name: Install gha-runner-scale-set
id: install_arc
run: |
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
helm install "$ARC_NAME" \
--namespace "arc-runners" \
--create-namespace \
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
--set minRunners=5 \
./charts/gha-runner-scale-set \
--debug
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
count=0
while true; do
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
exit 1
fi
sleep 1
count=$((count+1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
- name: Ensure 5 runners are up
run: |
count=0
while true; do
pod_count=$(kubectl get pods -n arc-runners --no-headers | wc -l)
if [[ "$pod_count" = 5 ]]; then
echo "5 pods are up!"
break
fi
if [[ "$count" -ge 12 ]]; then
echo "Timeout waiting for 5 pods to be created"
exit 1
fi
sleep 1
count=$((count+1))
done

View File

@@ -1,7 +1,9 @@
run: run:
timeout: 3m timeout: 3m
output: output:
format: github-actions formats:
- format: github-actions
path: stdout
linters-settings: linters-settings:
errcheck: errcheck:
exclude-functions: exclude-functions:

View File

@@ -1,2 +1,2 @@
# actions-runner-controller maintainers # actions-runner-controller maintainers
* @mumoshu @toast-gear @actions/actions-launch @nikola-jokic * @mumoshu @toast-gear @actions/actions-launch @nikola-jokic @rentziass

View File

@@ -1,5 +1,5 @@
# Build the manager binary # Build the manager binary
FROM --platform=$BUILDPLATFORM golang:1.21.3 as builder FROM --platform=$BUILDPLATFORM golang:1.22.1 as builder
WORKDIR /workspace WORKDIR /workspace
@@ -38,6 +38,7 @@ RUN --mount=target=. \
export GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOARM=${TARGETVARIANT#v} && \ export GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOARM=${TARGETVARIANT#v} && \
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/manager main.go && \ go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/manager main.go && \
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/github-runnerscaleset-listener ./cmd/githubrunnerscalesetlistener && \ go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/github-runnerscaleset-listener ./cmd/githubrunnerscalesetlistener && \
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/ghalistener ./cmd/ghalistener && \
go build -trimpath -ldflags="-s -w" -o /out/github-webhook-server ./cmd/githubwebhookserver && \ go build -trimpath -ldflags="-s -w" -o /out/github-webhook-server ./cmd/githubwebhookserver && \
go build -trimpath -ldflags="-s -w" -o /out/actions-metrics-server ./cmd/actionsmetricsserver && \ go build -trimpath -ldflags="-s -w" -o /out/actions-metrics-server ./cmd/actionsmetricsserver && \
go build -trimpath -ldflags="-s -w" -o /out/sleep ./cmd/sleep go build -trimpath -ldflags="-s -w" -o /out/sleep ./cmd/sleep
@@ -52,6 +53,7 @@ COPY --from=builder /out/manager .
COPY --from=builder /out/github-webhook-server . COPY --from=builder /out/github-webhook-server .
COPY --from=builder /out/actions-metrics-server . COPY --from=builder /out/actions-metrics-server .
COPY --from=builder /out/github-runnerscaleset-listener . COPY --from=builder /out/github-runnerscaleset-listener .
COPY --from=builder /out/ghalistener .
COPY --from=builder /out/sleep . COPY --from=builder /out/sleep .
USER 65532:65532 USER 65532:65532

View File

@@ -6,7 +6,7 @@ endif
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1) DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
VERSION ?= dev VERSION ?= dev
COMMIT_SHA = $(shell git rev-parse HEAD) COMMIT_SHA = $(shell git rev-parse HEAD)
RUNNER_VERSION ?= 2.311.0 RUNNER_VERSION ?= 2.316.1
TARGETPLATFORM ?= $(shell arch) TARGETPLATFORM ?= $(shell arch)
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
RUNNER_TAG ?= ${VERSION} RUNNER_TAG ?= ${VERSION}
@@ -68,7 +68,7 @@ endif
all: manager all: manager
lint: lint:
docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v1.55.2 golangci-lint run docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v1.57.2 golangci-lint run
GO_TEST_ARGS ?= -short GO_TEST_ARGS ?= -short
@@ -320,7 +320,7 @@ ifeq (, $(wildcard $(GOBIN)/controller-gen))
CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\ CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\
cd $$CONTROLLER_GEN_TMP_DIR ;\ cd $$CONTROLLER_GEN_TMP_DIR ;\
go mod init tmp ;\ go mod init tmp ;\
go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.13.0 ;\ go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.14.0 ;\
rm -rf $$CONTROLLER_GEN_TMP_DIR ;\ rm -rf $$CONTROLLER_GEN_TMP_DIR ;\
} }
endif endif

View File

@@ -42,6 +42,10 @@ type EphemeralRunner struct {
Status EphemeralRunnerStatus `json:"status,omitempty"` Status EphemeralRunnerStatus `json:"status,omitempty"`
} }
func (er *EphemeralRunner) IsDone() bool {
return er.Status.Phase == corev1.PodSucceeded || er.Status.Phase == corev1.PodFailed
}
// EphemeralRunnerSpec defines the desired state of EphemeralRunner // EphemeralRunnerSpec defines the desired state of EphemeralRunner
type EphemeralRunnerSpec struct { type EphemeralRunnerSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster

View File

@@ -24,6 +24,8 @@ import (
type EphemeralRunnerSetSpec struct { type EphemeralRunnerSetSpec struct {
// Replicas is the number of desired EphemeralRunner resources in the k8s namespace. // Replicas is the number of desired EphemeralRunner resources in the k8s namespace.
Replicas int `json:"replicas,omitempty"` Replicas int `json:"replicas,omitempty"`
// PatchID is the unique identifier for the patch issued by the listener app
PatchID int `json:"patchID"`
EphemeralRunnerSpec EphemeralRunnerSpec `json:"ephemeralRunnerSpec,omitempty"` EphemeralRunnerSpec EphemeralRunnerSpec `json:"ephemeralRunnerSpec,omitempty"`
} }

View File

@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
annotations: annotations:
controller-gen.kubebuilder.io/version: v0.13.0 controller-gen.kubebuilder.io/version: v0.14.0
name: horizontalrunnerautoscalers.actions.summerwind.dev name: horizontalrunnerautoscalers.actions.summerwind.dev
spec: spec:
group: actions.summerwind.dev group: actions.summerwind.dev
@@ -35,10 +35,19 @@ spec:
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
properties: properties:
apiVersion: apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string type: string
kind: kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string type: string
metadata: metadata:
type: object type: object
@@ -47,7 +56,9 @@ spec:
properties: properties:
capacityReservations: capacityReservations:
items: items:
description: CapacityReservation specifies the number of replicas temporarily added to the scale target until ExpirationTime. description: |-
CapacityReservation specifies the number of replicas temporarily added
to the scale target until ExpirationTime.
properties: properties:
effectiveTime: effectiveTime:
format: date-time format: date-time
@@ -79,30 +90,46 @@ spec:
items: items:
properties: properties:
repositoryNames: repositoryNames:
description: RepositoryNames is the list of repository names to be used for calculating the metric. For example, a repository name is the REPO part of `github.com/USER/REPO`. description: |-
RepositoryNames is the list of repository names to be used for calculating the metric.
For example, a repository name is the REPO part of `github.com/USER/REPO`.
items: items:
type: string type: string
type: array type: array
scaleDownAdjustment: scaleDownAdjustment:
description: ScaleDownAdjustment is the number of runners removed on scale-down. You can only specify either ScaleDownFactor or ScaleDownAdjustment. description: |-
ScaleDownAdjustment is the number of runners removed on scale-down.
You can only specify either ScaleDownFactor or ScaleDownAdjustment.
type: integer type: integer
scaleDownFactor: scaleDownFactor:
description: ScaleDownFactor is the multiplicative factor applied to the current number of runners used to determine how many pods should be removed. description: |-
ScaleDownFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be removed.
type: string type: string
scaleDownThreshold: scaleDownThreshold:
description: ScaleDownThreshold is the percentage of busy runners less than which will trigger the hpa to scale the runners down. description: |-
ScaleDownThreshold is the percentage of busy runners less than which will
trigger the hpa to scale the runners down.
type: string type: string
scaleUpAdjustment: scaleUpAdjustment:
description: ScaleUpAdjustment is the number of runners added on scale-up. You can only specify either ScaleUpFactor or ScaleUpAdjustment. description: |-
ScaleUpAdjustment is the number of runners added on scale-up.
You can only specify either ScaleUpFactor or ScaleUpAdjustment.
type: integer type: integer
scaleUpFactor: scaleUpFactor:
description: ScaleUpFactor is the multiplicative factor applied to the current number of runners used to determine how many pods should be added. description: |-
ScaleUpFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be added.
type: string type: string
scaleUpThreshold: scaleUpThreshold:
description: ScaleUpThreshold is the percentage of busy runners greater than which will trigger the hpa to scale runners up. description: |-
ScaleUpThreshold is the percentage of busy runners greater than which will
trigger the hpa to scale runners up.
type: string type: string
type: type:
description: Type is the type of metric to be used for autoscaling. It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy. description: |-
Type is the type of metric to be used for autoscaling.
It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
type: string type: string
type: object type: object
type: array type: array
@@ -110,7 +137,9 @@ spec:
description: MinReplicas is the minimum number of replicas the deployment is allowed to scale description: MinReplicas is the minimum number of replicas the deployment is allowed to scale
type: integer type: integer
scaleDownDelaySecondsAfterScaleOut: scaleDownDelaySecondsAfterScaleOut:
description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up Used to prevent flapping (down->up->down->... loop) description: |-
ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up
Used to prevent flapping (down->up->down->... loop)
type: integer type: integer
scaleTargetRef: scaleTargetRef:
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
@@ -126,7 +155,18 @@ spec:
type: string type: string
type: object type: object
scaleUpTriggers: scaleUpTriggers:
description: "ScaleUpTriggers is an experimental feature to increase the desired replicas by 1 on each webhook requested received by the webhookBasedAutoscaler. \n This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster. \n Note that the added runners remain until the next sync period at least, and they may or may not be used by GitHub Actions depending on the timing. They are intended to be used to gain \"resource slack\" immediately after you receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available." description: |-
ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
on each webhook requested received by the webhookBasedAutoscaler.
This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
Note that the added runners remain until the next sync period at least,
and they may or may not be used by GitHub Actions depending on the timing.
They are intended to be used to gain "resource slack" immediately after you
receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
items: items:
properties: properties:
amount: amount:
@@ -139,12 +179,18 @@ spec:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
properties: properties:
names: names:
description: Names is a list of GitHub Actions glob patterns. Any check_run event whose name matches one of patterns in the list can trigger autoscaling. Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file. So it is very likely that you can utilize this to trigger depending on the job. description: |-
Names is a list of GitHub Actions glob patterns.
Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
So it is very likely that you can utilize this to trigger depending on the job.
items: items:
type: string type: string
type: array type: array
repositories: repositories:
description: Repositories is a list of GitHub repositories. Any check_run event whose repository matches one of repositories in the list can trigger autoscaling. description: |-
Repositories is a list of GitHub repositories.
Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
items: items:
type: string type: string
type: array type: array
@@ -169,7 +215,9 @@ spec:
type: array type: array
type: object type: object
push: push:
description: PushSpec is the condition for triggering scale-up on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push description: |-
PushSpec is the condition for triggering scale-up on push event
Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
type: object type: object
workflowJob: workflowJob:
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
@@ -178,23 +226,33 @@ spec:
type: object type: object
type: array type: array
scheduledOverrides: scheduledOverrides:
description: ScheduledOverrides is the list of ScheduledOverride. It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. The earlier a scheduled override is, the higher it is prioritized. description: |-
ScheduledOverrides is the list of ScheduledOverride.
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
The earlier a scheduled override is, the higher it is prioritized.
items: items:
description: ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year. description: |-
ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
properties: properties:
endTime: endTime:
description: EndTime is the time at which the first override ends. description: EndTime is the time at which the first override ends.
format: date-time format: date-time
type: string type: string
minReplicas: minReplicas:
description: MinReplicas is the number of runners while overriding. If omitted, it doesn't override minReplicas. description: |-
MinReplicas is the number of runners while overriding.
If omitted, it doesn't override minReplicas.
minimum: 0 minimum: 0
nullable: true nullable: true
type: integer type: integer
recurrenceRule: recurrenceRule:
properties: properties:
frequency: frequency:
description: Frequency is the name of a predefined interval of each recurrence. The valid values are "Daily", "Weekly", "Monthly", and "Yearly". If empty, the corresponding override happens only once. description: |-
Frequency is the name of a predefined interval of each recurrence.
The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
If empty, the corresponding override happens only once.
enum: enum:
- Daily - Daily
- Weekly - Weekly
@@ -202,7 +260,9 @@ spec:
- Yearly - Yearly
type: string type: string
untilTime: untilTime:
description: UntilTime is the time of the final recurrence. If empty, the schedule recurs forever. description: |-
UntilTime is the time of the final recurrence.
If empty, the schedule recurs forever.
format: date-time format: date-time
type: string type: string
type: object type: object
@@ -231,18 +291,24 @@ spec:
type: object type: object
type: array type: array
desiredReplicas: desiredReplicas:
description: DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet This doesn't include outdated pods while upgrading the deployment and replacing the runnerset. description: |-
DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
type: integer type: integer
lastSuccessfulScaleOutTime: lastSuccessfulScaleOutTime:
format: date-time format: date-time
nullable: true nullable: true
type: string type: string
observedGeneration: observedGeneration:
description: ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g. RunnerDeployment's generation, which is updated on mutation by the API Server. description: |-
ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g.
RunnerDeployment's generation, which is updated on mutation by the API Server.
format: int64 format: int64
type: integer type: integer
scheduledOverridesSummary: scheduledOverridesSummary:
description: ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output for observability. description: |-
ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
for observability.
type: string type: string
type: object type: object
type: object type: object

View File

@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.7.0 version: 0.9.2
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using. # follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes. # It is recommended to use it with quotes.
appVersion: "0.7.0" appVersion: "0.9.2"
home: https://github.com/actions/actions-runner-controller home: https://github.com/actions/actions-runner-controller

View File

@@ -2,3 +2,4 @@ Thank you for installing {{ .Chart.Name }}.
Your release is named {{ .Release.Name }}. Your release is named {{ .Release.Name }}.
WARNING: Older version of the listener (githubrunnerscalesetlistener) is deprecated and will be removed in the future gha-runner-scale-set-0.10.0 release. If you are using environment variable override to force the old listener, please remove the environment variable and use the new listener (ghalistener) instead.

View File

@@ -126,7 +126,3 @@ Create the name of the service account to use
{{- end }} {{- end }}
{{- $names | join ","}} {{- $names | join ","}}
{{- end }} {{- end }}
{{- define "gha-runner-scale-set-controller.serviceMonitorName" -}}
{{- include "gha-runner-scale-set-controller.fullname" . }}-service-monitor
{{- end }}

View File

@@ -110,10 +110,16 @@ spec:
volumeMounts: volumeMounts:
- mountPath: /tmp - mountPath: /tmp
name: tmp name: tmp
{{- range .Values.volumeMounts }}
- {{ toYaml . | nindent 10 }}
{{- end }}
terminationGracePeriodSeconds: 10 terminationGracePeriodSeconds: 10
volumes: volumes:
- name: tmp - name: tmp
emptyDir: {} emptyDir: {}
{{- range .Values.volumes }}
- {{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }} {{- with .Values.nodeSelector }}
nodeSelector: nodeSelector:
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}
@@ -122,6 +128,10 @@ spec:
affinity: affinity:
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}
{{- with .Values.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }} {{- with .Values.tolerations }}
tolerations: tolerations:
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}

View File

@@ -345,6 +345,7 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 0) assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 0)
assert.Nil(t, deployment.Spec.Template.Spec.Affinity) assert.Nil(t, deployment.Spec.Template.Spec.Affinity)
assert.Len(t, deployment.Spec.Template.Spec.TopologySpreadConstraints, 0)
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 0) assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 0)
managerImage := "ghcr.io/actions/gha-runner-scale-set-controller:dev" managerImage := "ghcr.io/actions/gha-runner-scale-set-controller:dev"
@@ -424,10 +425,17 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
"tolerations[0].key": "foo", "tolerations[0].key": "foo",
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key": "foo", "affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key": "foo",
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator": "bar", "affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator": "bar",
"priorityClassName": "test-priority-class", "topologySpreadConstraints[0].labelSelector.matchLabels.foo": "bar",
"flags.updateStrategy": "eventual", "topologySpreadConstraints[0].maxSkew": "1",
"flags.logLevel": "info", "topologySpreadConstraints[0].topologyKey": "foo",
"flags.logFormat": "json", "priorityClassName": "test-priority-class",
"flags.updateStrategy": "eventual",
"flags.logLevel": "info",
"flags.logFormat": "json",
"volumes[0].name": "customMount",
"volumes[0].configMap.name": "my-configmap",
"volumeMounts[0].name": "customMount",
"volumeMounts[0].mountPath": "/my/mount/path",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@@ -470,9 +478,11 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.SecurityContext.FSGroup) assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.SecurityContext.FSGroup)
assert.Equal(t, "test-priority-class", deployment.Spec.Template.Spec.PriorityClassName) assert.Equal(t, "test-priority-class", deployment.Spec.Template.Spec.PriorityClassName)
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds) assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
assert.Len(t, deployment.Spec.Template.Spec.Volumes, 1) assert.Len(t, deployment.Spec.Template.Spec.Volumes, 2)
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Volumes[0].Name) assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Volumes[0].Name)
assert.NotNil(t, 10, deployment.Spec.Template.Spec.Volumes[0].EmptyDir) assert.NotNil(t, deployment.Spec.Template.Spec.Volumes[0].EmptyDir)
assert.Equal(t, "customMount", deployment.Spec.Template.Spec.Volumes[1].Name)
assert.Equal(t, "my-configmap", deployment.Spec.Template.Spec.Volumes[1].ConfigMap.Name)
assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 1) assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 1)
assert.Equal(t, "bar", deployment.Spec.Template.Spec.NodeSelector["foo"]) assert.Equal(t, "bar", deployment.Spec.Template.Spec.NodeSelector["foo"])
@@ -481,6 +491,11 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
assert.Equal(t, "foo", deployment.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Key) assert.Equal(t, "foo", deployment.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Key)
assert.Equal(t, "bar", string(deployment.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Operator)) assert.Equal(t, "bar", string(deployment.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Operator))
assert.Len(t, deployment.Spec.Template.Spec.TopologySpreadConstraints, 1)
assert.Equal(t, "bar", deployment.Spec.Template.Spec.TopologySpreadConstraints[0].LabelSelector.MatchLabels["foo"])
assert.Equal(t, int32(1), deployment.Spec.Template.Spec.TopologySpreadConstraints[0].MaxSkew)
assert.Equal(t, "foo", deployment.Spec.Template.Spec.TopologySpreadConstraints[0].TopologyKey)
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 1) assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 1)
assert.Equal(t, "foo", deployment.Spec.Template.Spec.Tolerations[0].Key) assert.Equal(t, "foo", deployment.Spec.Template.Spec.Tolerations[0].Key)
@@ -521,9 +536,11 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
assert.True(t, *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsNonRoot) assert.True(t, *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsNonRoot)
assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsUser) assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsUser)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 2)
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name) assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name)
assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath) assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath)
assert.Equal(t, "customMount", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name)
assert.Equal(t, "/my/mount/path", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath)
} }
func TestTemplate_EnableLeaderElectionRole(t *testing.T) { func TestTemplate_EnableLeaderElectionRole(t *testing.T) {
@@ -737,6 +754,7 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 0) assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 0)
assert.Nil(t, deployment.Spec.Template.Spec.Affinity) assert.Nil(t, deployment.Spec.Template.Spec.Affinity)
assert.Len(t, deployment.Spec.Template.Spec.TopologySpreadConstraints, 0)
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 0) assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 0)
managerImage := "ghcr.io/actions/gha-runner-scale-set-controller:dev" managerImage := "ghcr.io/actions/gha-runner-scale-set-controller:dev"

View File

@@ -72,14 +72,20 @@ tolerations: []
affinity: {} affinity: {}
topologySpreadConstraints: []
# Mount volumes in the container.
volumes: []
volumeMounts: []
# Leverage a PriorityClass to ensure your pods survive resource shortages # Leverage a PriorityClass to ensure your pods survive resource shortages
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
# PriorityClass: system-cluster-critical # PriorityClass: system-cluster-critical
priorityClassName: "" priorityClassName: ""
## If `metrics:` object is not provided, or commented out, the following flags ## If `metrics:` object is not provided, or commented out, the following flags
## will be applied the controller-manager and listener pods with empty values: ## will be applied the controller-manager and listener pods with empty values:
## `--metrics-addr`, `--listener-metrics-addr`, `--listener-metrics-endpoint`. ## `--metrics-addr`, `--listener-metrics-addr`, `--listener-metrics-endpoint`.
## This will disable metrics. ## This will disable metrics.
## ##
## To enable metrics, uncomment the following lines. ## To enable metrics, uncomment the following lines.

View File

@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.7.0 version: 0.9.2
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using. # follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes. # It is recommended to use it with quotes.
appVersion: "0.7.0" appVersion: "0.9.2"
home: https://github.com/actions/actions-runner-controller home: https://github.com/actions/actions-runner-controller

View File

@@ -99,7 +99,7 @@ volumeMounts:
image: docker:dind image: docker:dind
args: args:
- dockerd - dockerd
- --host=unix:///run/docker/docker.sock - --host=unix:///var/run/docker.sock
- --group=$(DOCKER_GROUP_GID) - --group=$(DOCKER_GROUP_GID)
env: env:
- name: DOCKER_GROUP_GID - name: DOCKER_GROUP_GID
@@ -110,7 +110,7 @@ volumeMounts:
- name: work - name: work
mountPath: /home/runner/_work mountPath: /home/runner/_work
- name: dind-sock - name: dind-sock
mountPath: /run/docker mountPath: /var/run
- name: dind-externals - name: dind-externals
mountPath: /home/runner/externals mountPath: /home/runner/externals
{{- end }} {{- end }}
@@ -223,7 +223,7 @@ env:
{{- end }} {{- end }}
{{- if $setDockerHost }} {{- if $setDockerHost }}
- name: DOCKER_HOST - name: DOCKER_HOST
value: unix:///run/docker/docker.sock value: unix:///var/run/docker.sock
{{- end }} {{- end }}
{{- if $setRunnerWaitDocker }} {{- if $setRunnerWaitDocker }}
- name: RUNNER_WAIT_FOR_DOCKER_IN_SECONDS - name: RUNNER_WAIT_FOR_DOCKER_IN_SECONDS
@@ -264,8 +264,7 @@ volumeMounts:
{{- end }} {{- end }}
{{- if $mountDindCert }} {{- if $mountDindCert }}
- name: dind-sock - name: dind-sock
mountPath: /run/docker mountPath: /var/run
readOnly: true
{{- end }} {{- end }}
{{- if $mountGitHubServerTLS }} {{- if $mountGitHubServerTLS }}
- name: github-server-tls-cert - name: github-server-tls-cert
@@ -385,6 +384,9 @@ volumeMounts:
{{- $setNodeExtraCaCerts = 1 }} {{- $setNodeExtraCaCerts = 1 }}
{{- $setRunnerUpdateCaCerts = 1 }} {{- $setRunnerUpdateCaCerts = 1 }}
{{- end }} {{- end }}
{{- $mountGitHubServerTLS := 0 }}
{{- if or $container.env $setNodeExtraCaCerts $setRunnerUpdateCaCerts }}
env: env:
{{- with $container.env }} {{- with $container.env }}
{{- range $i, $env := . }} {{- range $i, $env := . }}
@@ -405,10 +407,12 @@ volumeMounts:
- name: RUNNER_UPDATE_CA_CERTS - name: RUNNER_UPDATE_CA_CERTS
value: "1" value: "1"
{{- end }} {{- end }}
{{- $mountGitHubServerTLS := 0 }}
{{- if $tlsConfig.runnerMountPath }} {{- if $tlsConfig.runnerMountPath }}
{{- $mountGitHubServerTLS = 1 }} {{- $mountGitHubServerTLS = 1 }}
{{- end }} {{- end }}
{{- end }}
{{- if or $container.volumeMounts $mountGitHubServerTLS }}
volumeMounts: volumeMounts:
{{- with $container.volumeMounts }} {{- with $container.volumeMounts }}
{{- range $i, $volMount := . }} {{- range $i, $volMount := . }}
@@ -423,6 +427,7 @@ volumeMounts:
mountPath: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }} mountPath: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }}
subPath: {{ $tlsConfig.certificateFrom.configMapKeyRef.key }} subPath: {{ $tlsConfig.certificateFrom.configMapKeyRef.key }}
{{- end }} {{- end }}
{{- end}}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }} {{- end }}
@@ -520,13 +525,13 @@ volumeMounts:
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }} {{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }}
{{- fail "No gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} {{- fail "No gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }} {{- end }}
{{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }} {{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }}
{{- fail "Found both gha-rs-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} {{- fail "Found both gha-rs-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }} {{- end }}
{{- if gt $multiNamespacesCounter 1 }} {{- if gt $multiNamespacesCounter 1 }}
{{- fail "More than one gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} {{- fail "More than one gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }} {{- end }}
{{- if eq $multiNamespacesCounter 1 }} {{- if eq $multiNamespacesCounter 1 }}
{{- with $controllerDeployment.metadata }} {{- with $controllerDeployment.metadata }}
@@ -539,11 +544,11 @@ volumeMounts:
{{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }} {{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }}
{{- end }} {{- end }}
{{- else }} {{- else }}
{{- fail "No gha-rs-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} {{- fail "No gha-rs-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if eq $managerServiceAccountNamespace "" }} {{- if eq $managerServiceAccountNamespace "" }}
{{- fail "No service account namespace found for gha-rs-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} {{- fail "No service account namespace found for gha-rs-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }} {{- end }}
{{- $managerServiceAccountNamespace }} {{- $managerServiceAccountNamespace }}
{{- end }} {{- end }}

View File

@@ -13,6 +13,7 @@ metadata:
app.kubernetes.io/component: "autoscaling-runner-set" app.kubernetes.io/component: "autoscaling-runner-set"
{{- include "gha-runner-scale-set.labels" . | nindent 4 }} {{- include "gha-runner-scale-set.labels" . | nindent 4 }}
annotations: annotations:
actions.github.com/values-hash: {{ toJson .Values | sha256sum | trunc 63 }}
{{- $containerMode := .Values.containerMode }} {{- $containerMode := .Values.containerMode }}
{{- if not (kindIs "string" .Values.githubConfigSecret) }} {{- if not (kindIs "string" .Values.githubConfigSecret) }}
actions.github.com/cleanup-github-secret-name: {{ include "gha-runner-scale-set.githubsecret" . }} actions.github.com/cleanup-github-secret-name: {{ include "gha-runner-scale-set.githubsecret" . }}

View File

@@ -900,7 +900,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image) assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image)
assert.Len(t, ars.Spec.Template.Spec.Containers[0].Env, 2, "The runner container should have 2 env vars, DOCKER_HOST and RUNNER_WAIT_FOR_DOCKER_IN_SECONDS") assert.Len(t, ars.Spec.Template.Spec.Containers[0].Env, 2, "The runner container should have 2 env vars, DOCKER_HOST and RUNNER_WAIT_FOR_DOCKER_IN_SECONDS")
assert.Equal(t, "DOCKER_HOST", ars.Spec.Template.Spec.Containers[0].Env[0].Name) assert.Equal(t, "DOCKER_HOST", ars.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, "unix:///run/docker/docker.sock", ars.Spec.Template.Spec.Containers[0].Env[0].Value) assert.Equal(t, "unix:///var/run/docker.sock", ars.Spec.Template.Spec.Containers[0].Env[0].Value)
assert.Equal(t, "RUNNER_WAIT_FOR_DOCKER_IN_SECONDS", ars.Spec.Template.Spec.Containers[0].Env[1].Name) assert.Equal(t, "RUNNER_WAIT_FOR_DOCKER_IN_SECONDS", ars.Spec.Template.Spec.Containers[0].Env[1].Name)
assert.Equal(t, "120", ars.Spec.Template.Spec.Containers[0].Env[1].Value) assert.Equal(t, "120", ars.Spec.Template.Spec.Containers[0].Env[1].Value)
@@ -910,8 +910,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
assert.False(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].ReadOnly) assert.False(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].ReadOnly)
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name) assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name)
assert.Equal(t, "/run/docker", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath) assert.Equal(t, "/var/run", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath)
assert.True(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].ReadOnly)
assert.Equal(t, "dind", ars.Spec.Template.Spec.Containers[1].Name) assert.Equal(t, "dind", ars.Spec.Template.Spec.Containers[1].Name)
assert.Equal(t, "docker:dind", ars.Spec.Template.Spec.Containers[1].Image) assert.Equal(t, "docker:dind", ars.Spec.Template.Spec.Containers[1].Image)
@@ -921,7 +920,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].MountPath) assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].MountPath)
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].Name) assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].Name)
assert.Equal(t, "/run/docker", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].MountPath) assert.Equal(t, "/var/run", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].MountPath)
assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].Name) assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].Name)
assert.Equal(t, "/home/runner/externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].MountPath) assert.Equal(t, "/home/runner/externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].MountPath)
@@ -2017,3 +2016,130 @@ func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t
assert.Equal(t, value, autoscalingRunnerSet.Annotations[annotation], fmt.Sprintf("Annotation %q does not match the expected value", annotation)) assert.Equal(t, value, autoscalingRunnerSet.Annotations[annotation], fmt.Sprintf("Annotation %q does not match the expected value", annotation))
} }
} }
func TestRunnerContainerEnvNotEmptyMap(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
testValuesPath, err := filepath.Abs("../tests/values.yaml")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
ValuesFiles: []string{testValuesPath},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
type testModel struct {
Spec struct {
Template struct {
Spec struct {
Containers []map[string]any `yaml:"containers"`
} `yaml:"spec"`
} `yaml:"template"`
} `yaml:"spec"`
}
var m testModel
helm.UnmarshalK8SYaml(t, output, &m)
_, ok := m.Spec.Template.Spec.Containers[0]["env"]
assert.False(t, ok, "env should not be set")
}
func TestRunnerContainerVolumeNotEmptyMap(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
testValuesPath, err := filepath.Abs("../tests/values.yaml")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
ValuesFiles: []string{testValuesPath},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
type testModel struct {
Spec struct {
Template struct {
Spec struct {
Containers []map[string]any `yaml:"containers"`
} `yaml:"spec"`
} `yaml:"template"`
} `yaml:"spec"`
}
var m testModel
helm.UnmarshalK8SYaml(t, output, &m)
_, ok := m.Spec.Template.Spec.Containers[0]["volumeMounts"]
assert.False(t, ok, "volumeMounts should not be set")
}
func TestAutoscalingRunnerSetAnnotationValuesHash(t *testing.T) {
t.Parallel()
const valuesHash = "actions.github.com/values-hash"
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
firstHash := autoscalingRunnerSet.Annotations["actions.github.com/values-hash"]
assert.NotEmpty(t, firstHash)
assert.LessOrEqual(t, len(firstHash), 63)
helmChartPath, err = filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
options = &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token1234567890",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
secondHash := autoscalingRunnerSet.Annotations[valuesHash]
assert.NotEmpty(t, secondHash)
assert.NotEqual(t, firstHash, secondHash)
assert.LessOrEqual(t, len(secondHash), 63)
}

View File

@@ -39,7 +39,8 @@ githubConfigSecret:
## maxRunners is the max number of runners the autoscaling runner set will scale up to. ## maxRunners is the max number of runners the autoscaling runner set will scale up to.
# maxRunners: 5 # maxRunners: 5
## minRunners is the min number of runners the autoscaling runner set will scale down to. ## minRunners is the min number of idle runners. The target number of runners created will be
## calculated as a sum of minRunners and the number of jobs assigned to the scale set.
# minRunners: 0 # minRunners: 0
# runnerGroup: "default" # runnerGroup: "default"
@@ -87,7 +88,7 @@ githubConfigSecret:
# kubernetesModeServiceAccount: # kubernetesModeServiceAccount:
# annotations: # annotations:
## template is the PodSpec for each listener Pod ## listenerTemplate is the PodSpec for each listener Pod
## For reference: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec ## For reference: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec
# listenerTemplate: # listenerTemplate:
# spec: # spec:
@@ -124,18 +125,17 @@ template:
## command: ["/home/runner/run.sh"] ## command: ["/home/runner/run.sh"]
## env: ## env:
## - name: DOCKER_HOST ## - name: DOCKER_HOST
## value: unix:///run/docker/docker.sock ## value: unix:///var/run/docker.sock
## volumeMounts: ## volumeMounts:
## - name: work ## - name: work
## mountPath: /home/runner/_work ## mountPath: /home/runner/_work
## - name: dind-sock ## - name: dind-sock
## mountPath: /run/docker ## mountPath: /var/run
## readOnly: true
## - name: dind ## - name: dind
## image: docker:dind ## image: docker:dind
## args: ## args:
## - dockerd ## - dockerd
## - --host=unix:///run/docker/docker.sock ## - --host=unix:///var/run/docker.sock
## - --group=$(DOCKER_GROUP_GID) ## - --group=$(DOCKER_GROUP_GID)
## env: ## env:
## - name: DOCKER_GROUP_GID ## - name: DOCKER_GROUP_GID
@@ -146,7 +146,7 @@ template:
## - name: work ## - name: work
## mountPath: /home/runner/_work ## mountPath: /home/runner/_work
## - name: dind-sock ## - name: dind-sock
## mountPath: /run/docker ## mountPath: /var/run
## - name: dind-externals ## - name: dind-externals
## mountPath: /home/runner/externals ## mountPath: /home/runner/externals
## volumes: ## volumes:

137
cmd/ghalistener/app/app.go Normal file
View File

@@ -0,0 +1,137 @@
package app
import (
"context"
"errors"
"fmt"
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
"github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
"github.com/actions/actions-runner-controller/cmd/ghalistener/worker"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
"golang.org/x/sync/errgroup"
)
// App is responsible for initializing required components and running the app.
type App struct {
// configured fields
config config.Config
logger logr.Logger
// initialized fields
listener Listener
worker Worker
metrics metrics.ServerPublisher
}
//go:generate mockery --name Listener --output ./mocks --outpkg mocks --case underscore
type Listener interface {
Listen(ctx context.Context, handler listener.Handler) error
}
//go:generate mockery --name Worker --output ./mocks --outpkg mocks --case underscore
type Worker interface {
HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error
HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error)
}
func New(config config.Config) (*App, error) {
app := &App{
config: config,
}
ghConfig, err := actions.ParseGitHubConfigFromURL(config.ConfigureUrl)
if err != nil {
return nil, fmt.Errorf("failed to parse GitHub config from URL: %w", err)
}
{
logger, err := config.Logger()
if err != nil {
return nil, fmt.Errorf("failed to create logger: %w", err)
}
app.logger = logger.WithName("listener-app")
}
actionsClient, err := config.ActionsClient(app.logger)
if err != nil {
return nil, fmt.Errorf("failed to create actions client: %w", err)
}
if config.MetricsAddr != "" {
app.metrics = metrics.NewExporter(metrics.ExporterConfig{
ScaleSetName: config.EphemeralRunnerSetName,
ScaleSetNamespace: config.EphemeralRunnerSetNamespace,
Enterprise: ghConfig.Enterprise,
Organization: ghConfig.Organization,
Repository: ghConfig.Repository,
ServerAddr: config.MetricsAddr,
ServerEndpoint: config.MetricsEndpoint,
})
}
worker, err := worker.New(
worker.Config{
EphemeralRunnerSetNamespace: config.EphemeralRunnerSetNamespace,
EphemeralRunnerSetName: config.EphemeralRunnerSetName,
MaxRunners: config.MaxRunners,
MinRunners: config.MinRunners,
},
worker.WithLogger(app.logger.WithName("worker")),
)
if err != nil {
return nil, fmt.Errorf("failed to create new kubernetes worker: %w", err)
}
app.worker = worker
listener, err := listener.New(listener.Config{
Client: actionsClient,
ScaleSetID: app.config.RunnerScaleSetId,
MinRunners: app.config.MinRunners,
MaxRunners: app.config.MaxRunners,
Logger: app.logger.WithName("listener"),
Metrics: app.metrics,
})
if err != nil {
return nil, fmt.Errorf("failed to create new listener: %w", err)
}
app.listener = listener
app.logger.Info("app initialized")
return app, nil
}
func (app *App) Run(ctx context.Context) error {
var errs []error
if app.worker == nil {
errs = append(errs, fmt.Errorf("worker not initialized"))
}
if app.listener == nil {
errs = append(errs, fmt.Errorf("listener not initialized"))
}
if err := errors.Join(errs...); err != nil {
return fmt.Errorf("app not initialized: %w", err)
}
g, ctx := errgroup.WithContext(ctx)
metricsCtx, cancelMetrics := context.WithCancelCause(ctx)
g.Go(func() error {
app.logger.Info("Starting listener")
listnerErr := app.listener.Listen(ctx, app.worker)
cancelMetrics(fmt.Errorf("Listener exited: %w", listnerErr))
return listnerErr
})
if app.metrics != nil {
g.Go(func() error {
app.logger.Info("Starting metrics server")
return app.metrics.ListenAndServe(metricsCtx)
})
}
return g.Wait()
}

View File

@@ -0,0 +1,85 @@
package app
import (
"context"
"errors"
"testing"
appmocks "github.com/actions/actions-runner-controller/cmd/ghalistener/app/mocks"
"github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
metricsMocks "github.com/actions/actions-runner-controller/cmd/ghalistener/metrics/mocks"
"github.com/actions/actions-runner-controller/cmd/ghalistener/worker"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
func TestApp_Run(t *testing.T) {
t.Parallel()
t.Run("ListenerWorkerGuard", func(t *testing.T) {
invalidApps := []*App{
{},
{worker: &worker.Worker{}},
{listener: &listener.Listener{}},
}
for _, app := range invalidApps {
assert.Error(t, app.Run(context.Background()))
}
})
t.Run("ExitsOnListenerError", func(t *testing.T) {
listener := appmocks.NewListener(t)
worker := appmocks.NewWorker(t)
listener.On("Listen", mock.Anything, mock.Anything).Return(errors.New("listener error")).Once()
app := &App{
listener: listener,
worker: worker,
}
err := app.Run(context.Background())
assert.Error(t, err)
})
t.Run("ExitsOnListenerNil", func(t *testing.T) {
listener := appmocks.NewListener(t)
worker := appmocks.NewWorker(t)
listener.On("Listen", mock.Anything, mock.Anything).Return(nil).Once()
app := &App{
listener: listener,
worker: worker,
}
err := app.Run(context.Background())
assert.NoError(t, err)
})
t.Run("CancelListenerOnMetricsServerError", func(t *testing.T) {
listener := appmocks.NewListener(t)
worker := appmocks.NewWorker(t)
metrics := metricsMocks.NewServerPublisher(t)
ctx := context.Background()
listener.On("Listen", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
ctx := args.Get(0).(context.Context)
go func() {
<-ctx.Done()
}()
}).Return(nil).Once()
metrics.On("ListenAndServe", mock.Anything).Return(errors.New("metrics server error")).Once()
app := &App{
listener: listener,
worker: worker,
metrics: metrics,
}
err := app.Run(ctx)
assert.Error(t, err)
})
}

View File

@@ -0,0 +1,43 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package mocks
import (
context "context"
listener "github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
mock "github.com/stretchr/testify/mock"
)
// Listener is an autogenerated mock type for the Listener type
type Listener struct {
mock.Mock
}
// Listen provides a mock function with given fields: ctx, handler
func (_m *Listener) Listen(ctx context.Context, handler listener.Handler) error {
ret := _m.Called(ctx, handler)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, listener.Handler) error); ok {
r0 = rf(ctx, handler)
} else {
r0 = ret.Error(0)
}
return r0
}
// NewListener creates a new instance of Listener. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewListener(t interface {
mock.TestingT
Cleanup(func())
}) *Listener {
mock := &Listener{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -0,0 +1,68 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package mocks
import (
actions "github.com/actions/actions-runner-controller/github/actions"
context "context"
mock "github.com/stretchr/testify/mock"
)
// Worker is an autogenerated mock type for the Worker type
type Worker struct {
mock.Mock
}
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count, acquireCount
func (_m *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, acquireCount int) (int, error) {
ret := _m.Called(ctx, count, acquireCount)
var r0 int
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, int) (int, error)); ok {
return rf(ctx, count, acquireCount)
}
if rf, ok := ret.Get(0).(func(context.Context, int, int) int); ok {
r0 = rf(ctx, count, acquireCount)
} else {
r0 = ret.Get(0).(int)
}
if rf, ok := ret.Get(1).(func(context.Context, int, int) error); ok {
r1 = rf(ctx, count, acquireCount)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// HandleJobStarted provides a mock function with given fields: ctx, jobInfo
func (_m *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
ret := _m.Called(ctx, jobInfo)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *actions.JobStarted) error); ok {
r0 = rf(ctx, jobInfo)
} else {
r0 = ret.Error(0)
}
return r0
}
// NewWorker creates a new instance of Worker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewWorker(t interface {
mock.TestingT
Cleanup(func())
}) *Worker {
mock := &Worker{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -0,0 +1,161 @@
package config
import (
"crypto/x509"
"encoding/json"
"fmt"
"net/http"
"net/url"
"os"
"github.com/actions/actions-runner-controller/build"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/logging"
"github.com/go-logr/logr"
"golang.org/x/net/http/httpproxy"
)
type Config struct {
ConfigureUrl string `json:"configureUrl"`
AppID int64 `json:"appID"`
AppInstallationID int64 `json:"appInstallationID"`
AppPrivateKey string `json:"appPrivateKey"`
Token string `json:"token"`
EphemeralRunnerSetNamespace string `json:"ephemeralRunnerSetNamespace"`
EphemeralRunnerSetName string `json:"ephemeralRunnerSetName"`
MaxRunners int `json:"maxRunners"`
MinRunners int `json:"minRunners"`
RunnerScaleSetId int `json:"runnerScaleSetId"`
RunnerScaleSetName string `json:"runnerScaleSetName"`
ServerRootCA string `json:"serverRootCA"`
LogLevel string `json:"logLevel"`
LogFormat string `json:"logFormat"`
MetricsAddr string `json:"metricsAddr"`
MetricsEndpoint string `json:"metricsEndpoint"`
}
func Read(path string) (Config, error) {
f, err := os.Open(path)
if err != nil {
return Config{}, err
}
defer f.Close()
var config Config
if err := json.NewDecoder(f).Decode(&config); err != nil {
return Config{}, fmt.Errorf("failed to decode config: %w", err)
}
if err := config.validate(); err != nil {
return Config{}, fmt.Errorf("failed to validate config: %w", err)
}
return config, nil
}
func (c *Config) validate() error {
if len(c.ConfigureUrl) == 0 {
return fmt.Errorf("GitHubConfigUrl is not provided")
}
if len(c.EphemeralRunnerSetNamespace) == 0 || len(c.EphemeralRunnerSetName) == 0 {
return fmt.Errorf("EphemeralRunnerSetNamespace '%s' or EphemeralRunnerSetName '%s' is missing", c.EphemeralRunnerSetNamespace, c.EphemeralRunnerSetName)
}
if c.RunnerScaleSetId == 0 {
return fmt.Errorf("RunnerScaleSetId '%d' is missing", c.RunnerScaleSetId)
}
if c.MaxRunners < c.MinRunners {
return fmt.Errorf("MinRunners '%d' cannot be greater than MaxRunners '%d'", c.MinRunners, c.MaxRunners)
}
hasToken := len(c.Token) > 0
hasPrivateKeyConfig := c.AppID > 0 && c.AppPrivateKey != ""
if !hasToken && !hasPrivateKeyConfig {
return fmt.Errorf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
}
if hasToken && hasPrivateKeyConfig {
return fmt.Errorf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
}
return nil
}
func (c *Config) Logger() (logr.Logger, error) {
logLevel := string(logging.LogLevelDebug)
if c.LogLevel != "" {
logLevel = c.LogLevel
}
logFormat := string(logging.LogFormatText)
if c.LogFormat != "" {
logFormat = c.LogFormat
}
logger, err := logging.NewLogger(logLevel, logFormat)
if err != nil {
return logr.Logger{}, fmt.Errorf("NewLogger failed: %w", err)
}
return logger, nil
}
func (c *Config) ActionsClient(logger logr.Logger, clientOptions ...actions.ClientOption) (*actions.Client, error) {
var creds actions.ActionsAuth
switch c.Token {
case "":
creds.AppCreds = &actions.GitHubAppAuth{
AppID: c.AppID,
AppInstallationID: c.AppInstallationID,
AppPrivateKey: c.AppPrivateKey,
}
default:
creds.Token = c.Token
}
options := append([]actions.ClientOption{
actions.WithLogger(logger),
}, clientOptions...)
if c.ServerRootCA != "" {
systemPool, err := x509.SystemCertPool()
if err != nil {
return nil, fmt.Errorf("failed to load system cert pool: %w", err)
}
pool := systemPool.Clone()
ok := pool.AppendCertsFromPEM([]byte(c.ServerRootCA))
if !ok {
return nil, fmt.Errorf("failed to parse root certificate")
}
options = append(options, actions.WithRootCAs(pool))
}
proxyFunc := httpproxy.FromEnvironment().ProxyFunc()
options = append(options, actions.WithProxy(func(req *http.Request) (*url.URL, error) {
return proxyFunc(req.URL)
}))
client, err := actions.NewClient(c.ConfigureUrl, &creds, options...)
if err != nil {
return nil, fmt.Errorf("failed to create actions client: %w", err)
}
client.SetUserAgent(actions.UserAgentInfo{
Version: build.Version,
CommitSHA: build.CommitSHA,
ScaleSetID: c.RunnerScaleSetId,
HasProxy: hasProxy(),
Subsystem: "ghalistener",
})
return client, nil
}
func hasProxy() bool {
proxyFunc := httpproxy.FromEnvironment().ProxyFunc()
return proxyFunc != nil
}

View File

@@ -0,0 +1,161 @@
package config_test
import (
"context"
"crypto/tls"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"testing"
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/github/actions/testserver"
"github.com/go-logr/logr"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestCustomerServerRootCA(t *testing.T) {
ctx := context.Background()
certsFolder := filepath.Join(
"../../../",
"github",
"actions",
"testdata",
)
certPath := filepath.Join(certsFolder, "server.crt")
keyPath := filepath.Join(certsFolder, "server.key")
serverCalledSuccessfully := false
server := testserver.NewUnstarted(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
serverCalledSuccessfully = true
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"count": 0}`))
}))
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
require.NoError(t, err)
server.TLS = &tls.Config{Certificates: []tls.Certificate{cert}}
server.StartTLS()
var certsString string
rootCA, err := os.ReadFile(filepath.Join(certsFolder, "rootCA.crt"))
require.NoError(t, err)
certsString = string(rootCA)
intermediate, err := os.ReadFile(filepath.Join(certsFolder, "intermediate.pem"))
require.NoError(t, err)
certsString = certsString + string(intermediate)
config := config.Config{
ConfigureUrl: server.ConfigURLForOrg("myorg"),
ServerRootCA: certsString,
Token: "token",
}
client, err := config.ActionsClient(logr.Discard())
require.NoError(t, err)
_, err = client.GetRunnerScaleSet(ctx, 1, "test")
require.NoError(t, err)
assert.True(t, serverCalledSuccessfully)
}
func TestProxySettings(t *testing.T) {
t.Run("http", func(t *testing.T) {
wentThroughProxy := false
proxy := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
wentThroughProxy = true
}))
t.Cleanup(func() {
proxy.Close()
})
prevProxy := os.Getenv("http_proxy")
os.Setenv("http_proxy", proxy.URL)
defer os.Setenv("http_proxy", prevProxy)
config := config.Config{
ConfigureUrl: "https://github.com/org/repo",
Token: "token",
}
client, err := config.ActionsClient(logr.Discard())
require.NoError(t, err)
req, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
require.NoError(t, err)
_, err = client.Do(req)
require.NoError(t, err)
assert.True(t, wentThroughProxy)
})
t.Run("https", func(t *testing.T) {
wentThroughProxy := false
proxy := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
wentThroughProxy = true
}))
t.Cleanup(func() {
proxy.Close()
})
prevProxy := os.Getenv("https_proxy")
os.Setenv("https_proxy", proxy.URL)
defer os.Setenv("https_proxy", prevProxy)
config := config.Config{
ConfigureUrl: "https://github.com/org/repo",
Token: "token",
}
client, err := config.ActionsClient(logr.Discard(), actions.WithRetryMax(0))
require.NoError(t, err)
req, err := http.NewRequest(http.MethodGet, "https://example.com", nil)
require.NoError(t, err)
_, err = client.Do(req)
// proxy doesn't support https
assert.Error(t, err)
assert.True(t, wentThroughProxy)
})
t.Run("no_proxy", func(t *testing.T) {
wentThroughProxy := false
proxy := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
wentThroughProxy = true
}))
t.Cleanup(func() {
proxy.Close()
})
prevProxy := os.Getenv("http_proxy")
os.Setenv("http_proxy", proxy.URL)
defer os.Setenv("http_proxy", prevProxy)
prevNoProxy := os.Getenv("no_proxy")
os.Setenv("no_proxy", "example.com")
defer os.Setenv("no_proxy", prevNoProxy)
config := config.Config{
ConfigureUrl: "https://github.com/org/repo",
Token: "token",
}
client, err := config.ActionsClient(logr.Discard())
require.NoError(t, err)
req, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
require.NoError(t, err)
_, err = client.Do(req)
require.NoError(t, err)
assert.False(t, wentThroughProxy)
})
}

View File

@@ -0,0 +1,92 @@
package config
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestConfigValidationMinMax(t *testing.T) {
config := &Config{
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 5,
MaxRunners: 2,
Token: "token",
}
err := config.validate()
assert.ErrorContains(t, err, "MinRunners '5' cannot be greater than MaxRunners '2", "Expected error about MinRunners > MaxRunners")
}
func TestConfigValidationMissingToken(t *testing.T) {
config := &Config{
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.validate()
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidationAppKey(t *testing.T) {
config := &Config{
AppID: 1,
AppInstallationID: 10,
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.validate()
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) {
config := &Config{
AppID: 1,
AppInstallationID: 10,
AppPrivateKey: "asdf",
Token: "asdf",
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.validate()
expectedError := fmt.Sprintf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidation(t *testing.T) {
config := &Config{
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 1,
MaxRunners: 5,
Token: "asdf",
}
err := config.validate()
assert.NoError(t, err, "Expected no error")
}
func TestConfigValidationConfigUrl(t *testing.T) {
config := &Config{
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.validate()
assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl")
}

View File

@@ -0,0 +1,452 @@
package listener
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"os"
"time"
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
"github.com/google/uuid"
)
const (
sessionCreationMaxRetries = 10
)
// message types
const (
messageTypeJobAvailable = "JobAvailable"
messageTypeJobAssigned = "JobAssigned"
messageTypeJobStarted = "JobStarted"
messageTypeJobCompleted = "JobCompleted"
)
//go:generate mockery --name Client --output ./mocks --outpkg mocks --case underscore
type Client interface {
GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*actions.AcquirableJobList, error)
CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*actions.RunnerScaleSetSession, error)
GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error)
DeleteMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, messageId int64) error
AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error)
RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*actions.RunnerScaleSetSession, error)
DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error
}
type Config struct {
Client Client
ScaleSetID int
MinRunners int
MaxRunners int
Logger logr.Logger
Metrics metrics.Publisher
}
func (c *Config) Validate() error {
if c.Client == nil {
return errors.New("client is required")
}
if c.ScaleSetID == 0 {
return errors.New("scaleSetID is required")
}
if c.MinRunners < 0 {
return errors.New("minRunners must be greater than or equal to 0")
}
if c.MaxRunners < 0 {
return errors.New("maxRunners must be greater than or equal to 0")
}
if c.MaxRunners > 0 && c.MinRunners > c.MaxRunners {
return errors.New("minRunners must be less than or equal to maxRunners")
}
return nil
}
// The Listener's role is to manage all interactions with the actions service.
// It receives messages and processes them using the given handler.
type Listener struct {
// configured fields
scaleSetID int // The ID of the scale set associated with the listener.
client Client // The client used to interact with the scale set.
metrics metrics.Publisher // The publisher used to publish metrics.
// internal fields
logger logr.Logger // The logger used for logging.
hostname string // The hostname of the listener.
// updated fields
lastMessageID int64 // The ID of the last processed message.
maxCapacity int // The maximum number of runners that can be created.
session *actions.RunnerScaleSetSession // The session for managing the runner scale set.
}
func New(config Config) (*Listener, error) {
if err := config.Validate(); err != nil {
return nil, fmt.Errorf("invalid config: %w", err)
}
listener := &Listener{
scaleSetID: config.ScaleSetID,
client: config.Client,
logger: config.Logger,
metrics: metrics.Discard,
maxCapacity: config.MaxRunners,
}
if config.Metrics != nil {
listener.metrics = config.Metrics
}
listener.metrics.PublishStatic(config.MinRunners, config.MaxRunners)
hostname, err := os.Hostname()
if err != nil {
hostname = uuid.NewString()
listener.logger.Info("Failed to get hostname, fallback to uuid", "uuid", hostname, "error", err)
}
listener.hostname = hostname
return listener, nil
}
//go:generate mockery --name Handler --output ./mocks --outpkg mocks --case underscore
type Handler interface {
HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error
HandleDesiredRunnerCount(ctx context.Context, count, jobsCompleted int) (int, error)
}
// Listen listens for incoming messages and handles them using the provided handler.
// It continuously listens for messages until the context is cancelled.
// The initial message contains the current statistics and acquirable jobs, if any.
// The handler is responsible for handling the initial message and subsequent messages.
// If an error occurs during any step, Listen returns an error.
func (l *Listener) Listen(ctx context.Context, handler Handler) error {
if err := l.createSession(ctx); err != nil {
return fmt.Errorf("createSession failed: %w", err)
}
defer func() {
if err := l.deleteMessageSession(); err != nil {
l.logger.Error(err, "failed to delete message session")
}
}()
initialMessage := &actions.RunnerScaleSetMessage{
MessageId: 0,
MessageType: "RunnerScaleSetJobMessages",
Statistics: l.session.Statistics,
Body: "",
}
if l.session.Statistics == nil {
return fmt.Errorf("session statistics is nil")
}
l.metrics.PublishStatistics(initialMessage.Statistics)
desiredRunners, err := handler.HandleDesiredRunnerCount(ctx, initialMessage.Statistics.TotalAssignedJobs, 0)
if err != nil {
return fmt.Errorf("handling initial message failed: %w", err)
}
l.metrics.PublishDesiredRunners(desiredRunners)
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
msg, err := l.getMessage(ctx)
if err != nil {
return fmt.Errorf("failed to get message: %w", err)
}
if msg == nil {
_, err := handler.HandleDesiredRunnerCount(ctx, 0, 0)
if err != nil {
return fmt.Errorf("handling nil message failed: %w", err)
}
continue
}
// Remove cancellation from the context to avoid cancelling the message handling.
if err := l.handleMessage(context.WithoutCancel(ctx), handler, msg); err != nil {
return fmt.Errorf("failed to handle message: %w", err)
}
}
}
func (l *Listener) handleMessage(ctx context.Context, handler Handler, msg *actions.RunnerScaleSetMessage) error {
parsedMsg, err := l.parseMessage(ctx, msg)
if err != nil {
return fmt.Errorf("failed to parse message: %w", err)
}
l.metrics.PublishStatistics(parsedMsg.statistics)
if len(parsedMsg.jobsAvailable) > 0 {
acquiredJobIDs, err := l.acquireAvailableJobs(ctx, parsedMsg.jobsAvailable)
if err != nil {
return fmt.Errorf("failed to acquire jobs: %w", err)
}
l.logger.Info("Jobs are acquired", "count", len(acquiredJobIDs), "requestIds", fmt.Sprint(acquiredJobIDs))
}
for _, jobCompleted := range parsedMsg.jobsCompleted {
l.metrics.PublishJobCompleted(jobCompleted)
}
l.lastMessageID = msg.MessageId
if err := l.deleteLastMessage(ctx); err != nil {
return fmt.Errorf("failed to delete message: %w", err)
}
for _, jobStarted := range parsedMsg.jobsStarted {
if err := handler.HandleJobStarted(ctx, jobStarted); err != nil {
return fmt.Errorf("failed to handle job started: %w", err)
}
l.metrics.PublishJobStarted(jobStarted)
}
desiredRunners, err := handler.HandleDesiredRunnerCount(ctx, parsedMsg.statistics.TotalAssignedJobs, len(parsedMsg.jobsCompleted))
if err != nil {
return fmt.Errorf("failed to handle desired runner count: %w", err)
}
l.metrics.PublishDesiredRunners(desiredRunners)
return nil
}
func (l *Listener) createSession(ctx context.Context) error {
var session *actions.RunnerScaleSetSession
var retries int
for {
var err error
session, err = l.client.CreateMessageSession(ctx, l.scaleSetID, l.hostname)
if err == nil {
break
}
clientErr := &actions.HttpClientSideError{}
if !errors.As(err, &clientErr) {
return fmt.Errorf("failed to create session: %w", err)
}
if clientErr.Code != http.StatusConflict {
return fmt.Errorf("failed to create session: %w", err)
}
retries++
if retries >= sessionCreationMaxRetries {
return fmt.Errorf("failed to create session after %d retries: %w", retries, err)
}
l.logger.Info("Unable to create message session. Will try again in 30 seconds", "error", err.Error())
select {
case <-ctx.Done():
return fmt.Errorf("context cancelled: %w", ctx.Err())
case <-time.After(30 * time.Second):
}
}
statistics, err := json.Marshal(session.Statistics)
if err != nil {
return fmt.Errorf("failed to marshal statistics: %w", err)
}
l.logger.Info("Current runner scale set statistics.", "statistics", string(statistics))
l.session = session
return nil
}
func (l *Listener) getMessage(ctx context.Context) (*actions.RunnerScaleSetMessage, error) {
l.logger.Info("Getting next message", "lastMessageID", l.lastMessageID)
msg, err := l.client.GetMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID, l.maxCapacity)
if err == nil { // if NO error
return msg, nil
}
expiredError := &actions.MessageQueueTokenExpiredError{}
if !errors.As(err, &expiredError) {
return nil, fmt.Errorf("failed to get next message: %w", err)
}
if err := l.refreshSession(ctx); err != nil {
return nil, err
}
l.logger.Info("Getting next message", "lastMessageID", l.lastMessageID)
msg, err = l.client.GetMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID, l.maxCapacity)
if err != nil { // if NO error
return nil, fmt.Errorf("failed to get next message after message session refresh: %w", err)
}
return msg, nil
}
func (l *Listener) deleteLastMessage(ctx context.Context) error {
l.logger.Info("Deleting last message", "lastMessageID", l.lastMessageID)
err := l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID)
if err == nil { // if NO error
return nil
}
expiredError := &actions.MessageQueueTokenExpiredError{}
if !errors.As(err, &expiredError) {
return fmt.Errorf("failed to delete last message: %w", err)
}
if err := l.refreshSession(ctx); err != nil {
return err
}
err = l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID)
if err != nil {
return fmt.Errorf("failed to delete last message after message session refresh: %w", err)
}
return nil
}
type parsedMessage struct {
statistics *actions.RunnerScaleSetStatistic
jobsStarted []*actions.JobStarted
jobsAvailable []*actions.JobAvailable
jobsCompleted []*actions.JobCompleted
}
func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSetMessage) (*parsedMessage, error) {
if msg.MessageType != "RunnerScaleSetJobMessages" {
l.logger.Info("Skipping message", "messageType", msg.MessageType)
return nil, fmt.Errorf("invalid message type: %s", msg.MessageType)
}
l.logger.Info("Processing message", "messageId", msg.MessageId, "messageType", msg.MessageType)
if msg.Statistics == nil {
return nil, fmt.Errorf("invalid message: statistics is nil")
}
l.logger.Info("New runner scale set statistics.", "statistics", msg.Statistics)
var batchedMessages []json.RawMessage
if len(msg.Body) > 0 {
if err := json.Unmarshal([]byte(msg.Body), &batchedMessages); err != nil {
return nil, fmt.Errorf("failed to unmarshal batched messages: %w", err)
}
}
parsedMsg := &parsedMessage{
statistics: msg.Statistics,
}
for _, msg := range batchedMessages {
var messageType actions.JobMessageType
if err := json.Unmarshal(msg, &messageType); err != nil {
return nil, fmt.Errorf("failed to decode job message type: %w", err)
}
switch messageType.MessageType {
case messageTypeJobAvailable:
var jobAvailable actions.JobAvailable
if err := json.Unmarshal(msg, &jobAvailable); err != nil {
return nil, fmt.Errorf("failed to decode job available: %w", err)
}
l.logger.Info("Job available message received", "jobId", jobAvailable.RunnerRequestId)
parsedMsg.jobsAvailable = append(parsedMsg.jobsAvailable, &jobAvailable)
case messageTypeJobAssigned:
var jobAssigned actions.JobAssigned
if err := json.Unmarshal(msg, &jobAssigned); err != nil {
return nil, fmt.Errorf("failed to decode job assigned: %w", err)
}
l.logger.Info("Job assigned message received", "jobId", jobAssigned.RunnerRequestId)
case messageTypeJobStarted:
var jobStarted actions.JobStarted
if err := json.Unmarshal(msg, &jobStarted); err != nil {
return nil, fmt.Errorf("could not decode job started message. %w", err)
}
l.logger.Info("Job started message received.", "RequestId", jobStarted.RunnerRequestId, "RunnerId", jobStarted.RunnerId)
parsedMsg.jobsStarted = append(parsedMsg.jobsStarted, &jobStarted)
case messageTypeJobCompleted:
var jobCompleted actions.JobCompleted
if err := json.Unmarshal(msg, &jobCompleted); err != nil {
return nil, fmt.Errorf("failed to decode job completed: %w", err)
}
l.logger.Info("Job completed message received.", "RequestId", jobCompleted.RunnerRequestId, "Result", jobCompleted.Result, "RunnerId", jobCompleted.RunnerId, "RunnerName", jobCompleted.RunnerName)
parsedMsg.jobsCompleted = append(parsedMsg.jobsCompleted, &jobCompleted)
default:
l.logger.Info("unknown job message type.", "messageType", messageType.MessageType)
}
}
return parsedMsg, nil
}
func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*actions.JobAvailable) ([]int64, error) {
ids := make([]int64, 0, len(jobsAvailable))
for _, job := range jobsAvailable {
ids = append(ids, job.RunnerRequestId)
}
l.logger.Info("Acquiring jobs", "count", len(ids), "requestIds", fmt.Sprint(ids))
idsAcquired, err := l.client.AcquireJobs(ctx, l.scaleSetID, l.session.MessageQueueAccessToken, ids)
if err == nil { // if NO errors
return idsAcquired, nil
}
expiredError := &actions.MessageQueueTokenExpiredError{}
if !errors.As(err, &expiredError) {
return nil, fmt.Errorf("failed to acquire jobs: %w", err)
}
if err := l.refreshSession(ctx); err != nil {
return nil, err
}
idsAcquired, err = l.client.AcquireJobs(ctx, l.scaleSetID, l.session.MessageQueueAccessToken, ids)
if err != nil {
return nil, fmt.Errorf("failed to acquire jobs after session refresh: %w", err)
}
return idsAcquired, nil
}
func (l *Listener) refreshSession(ctx context.Context) error {
l.logger.Info("Message queue token is expired during GetNextMessage, refreshing...")
session, err := l.client.RefreshMessageSession(ctx, l.session.RunnerScaleSet.Id, l.session.SessionId)
if err != nil {
return fmt.Errorf("refresh message session failed. %w", err)
}
l.session = session
return nil
}
func (l *Listener) deleteMessageSession() error {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
l.logger.Info("Deleting message session")
if err := l.client.DeleteMessageSession(ctx, l.session.RunnerScaleSet.Id, l.session.SessionId); err != nil {
return fmt.Errorf("failed to delete message session: %w", err)
}
return nil
}

View File

@@ -0,0 +1,970 @@
package listener
import (
"context"
"encoding/json"
"errors"
"net/http"
"testing"
"time"
listenermocks "github.com/actions/actions-runner-controller/cmd/ghalistener/listener/mocks"
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestNew(t *testing.T) {
t.Parallel()
t.Run("InvalidConfig", func(t *testing.T) {
t.Parallel()
var config Config
_, err := New(config)
assert.NotNil(t, err)
})
t.Run("ValidConfig", func(t *testing.T) {
t.Parallel()
config := Config{
Client: listenermocks.NewClient(t),
ScaleSetID: 1,
Metrics: metrics.Discard,
}
l, err := New(config)
assert.Nil(t, err)
assert.NotNil(t, l)
})
}
func TestListener_createSession(t *testing.T) {
t.Parallel()
t.Run("FailOnce", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(nil, assert.AnError).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
err = l.createSession(ctx)
assert.NotNil(t, err)
})
t.Run("FailContext", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(nil,
&actions.HttpClientSideError{Code: http.StatusConflict}).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
err = l.createSession(ctx)
assert.True(t, errors.Is(err, context.DeadlineExceeded))
})
t.Run("SetsSession", func(t *testing.T) {
t.Parallel()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("CreateMessageSession", mock.Anything, mock.Anything, mock.Anything).Return(session, nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
err = l.createSession(context.Background())
assert.Nil(t, err)
assert.Equal(t, session, l.session)
})
}
func TestListener_getMessage(t *testing.T) {
t.Parallel()
t.Run("ReceivesMessage", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
MaxRunners: 10,
}
client := listenermocks.NewClient(t)
want := &actions.RunnerScaleSetMessage{
MessageId: 1,
}
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(want, nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{}
got, err := l.getMessage(ctx)
assert.Nil(t, err)
assert.Equal(t, want, got)
})
t.Run("NotExpiredError", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
MaxRunners: 10,
}
client := listenermocks.NewClient(t)
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(nil, &actions.HttpClientSideError{Code: http.StatusNotFound}).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{}
_, err = l.getMessage(ctx)
assert.NotNil(t, err)
})
t.Run("RefreshAndSucceeds", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
MaxRunners: 10,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once()
want := &actions.RunnerScaleSetMessage{
MessageId: 1,
}
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(want, nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
got, err := l.getMessage(ctx)
assert.Nil(t, err)
assert.Equal(t, want, got)
})
t.Run("RefreshAndFails", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
MaxRunners: 10,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(nil, &actions.MessageQueueTokenExpiredError{}).Twice()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
got, err := l.getMessage(ctx)
assert.NotNil(t, err)
assert.Nil(t, got)
})
}
func TestListener_refreshSession(t *testing.T) {
t.Parallel()
t.Run("SuccessfullyRefreshes", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
newUUID := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &newUUID,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
oldUUID := uuid.New()
l.session = &actions.RunnerScaleSetSession{
SessionId: &oldUUID,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
err = l.refreshSession(ctx)
assert.Nil(t, err)
assert.Equal(t, session, l.session)
})
t.Run("FailsToRefresh", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(nil, errors.New("error")).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
oldUUID := uuid.New()
oldSession := &actions.RunnerScaleSetSession{
SessionId: &oldUUID,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
l.session = oldSession
err = l.refreshSession(ctx)
assert.NotNil(t, err)
assert.Equal(t, oldSession, l.session)
})
}
func TestListener_deleteLastMessage(t *testing.T) {
t.Parallel()
t.Run("SuccessfullyDeletes", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.MatchedBy(func(lastMessageID any) bool {
return lastMessageID.(int64) == int64(5)
})).Return(nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{}
l.lastMessageID = 5
err = l.deleteLastMessage(ctx)
assert.Nil(t, err)
})
t.Run("FailsToDelete", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("error")).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{}
l.lastMessageID = 5
err = l.deleteLastMessage(ctx)
assert.NotNil(t, err)
})
t.Run("RefreshAndSucceeds", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
newUUID := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &newUUID,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(&actions.MessageQueueTokenExpiredError{}).Once()
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.MatchedBy(func(lastMessageID any) bool {
return lastMessageID.(int64) == int64(5)
})).Return(nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
oldUUID := uuid.New()
l.session = &actions.RunnerScaleSetSession{
SessionId: &oldUUID,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
l.lastMessageID = 5
config.Client = client
err = l.deleteLastMessage(ctx)
assert.NoError(t, err)
})
t.Run("RefreshAndFails", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
newUUID := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &newUUID,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(&actions.MessageQueueTokenExpiredError{}).Twice()
config.Client = client
l, err := New(config)
require.Nil(t, err)
oldUUID := uuid.New()
l.session = &actions.RunnerScaleSetSession{
SessionId: &oldUUID,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
l.lastMessageID = 5
config.Client = client
err = l.deleteLastMessage(ctx)
assert.Error(t, err)
})
}
func TestListener_Listen(t *testing.T) {
t.Parallel()
t.Run("CreateSessionFails", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(nil, assert.AnError).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
err = l.Listen(ctx, nil)
assert.NotNil(t, err)
})
t.Run("CallHandleRegardlessOfInitialMessage", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: &actions.RunnerScaleSetStatistic{},
}
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessageSession", mock.Anything, session.RunnerScaleSet.Id, session.SessionId).Return(nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
var called bool
handler := listenermocks.NewHandler(t)
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 0).
Return(0, nil).
Run(
func(mock.Arguments) {
called = true
cancel()
},
).
Once()
err = l.Listen(ctx, handler)
assert.True(t, errors.Is(err, context.Canceled))
assert.True(t, called)
})
t.Run("CancelContextAfterGetMessage", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
MaxRunners: 10,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: &actions.RunnerScaleSetStatistic{},
}
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessageSession", mock.Anything, session.RunnerScaleSet.Id, session.SessionId).Return(nil).Once()
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Statistics: &actions.RunnerScaleSetStatistic{},
}
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).
Return(msg, nil).
Run(
func(mock.Arguments) {
cancel()
},
).
Once()
// Ensure delete message is called without cancel
client.On("DeleteMessage", context.WithoutCancel(ctx), mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
config.Client = client
handler := listenermocks.NewHandler(t)
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 0).
Return(0, nil).
Once()
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 0).
Return(0, nil).
Once()
l, err := New(config)
require.Nil(t, err)
err = l.Listen(ctx, handler)
assert.ErrorIs(t, context.Canceled, err)
})
}
func TestListener_acquireAvailableJobs(t *testing.T) {
t.Parallel()
t.Run("FailingToAcquireJobs", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, assert.AnError).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
uuid := uuid.New()
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: &actions.RunnerScaleSetStatistic{},
}
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 3,
},
},
}
_, err = l.acquireAvailableJobs(ctx, availableJobs)
assert.Error(t, err)
})
t.Run("SuccessfullyAcquiresJobsOnFirstRun", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
jobIDs := []int64{1, 2, 3}
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(jobIDs, nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
uuid := uuid.New()
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: &actions.RunnerScaleSetStatistic{},
}
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 3,
},
},
}
acquiredJobIDs, err := l.acquireAvailableJobs(ctx, availableJobs)
assert.NoError(t, err)
assert.Equal(t, []int64{1, 2, 3}, acquiredJobIDs)
})
t.Run("RefreshAndSucceeds", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
// Second call to AcquireJobs will succeed
want := []int64{1, 2, 3}
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 3,
},
},
}
// First call to AcquireJobs will fail with a token expired error
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).
Run(func(args mock.Arguments) {
ids := args.Get(3).([]int64)
assert.Equal(t, want, ids)
}).
Return(nil, &actions.MessageQueueTokenExpiredError{}).
Once()
// Second call should succeed
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).
Run(func(args mock.Arguments) {
ids := args.Get(3).([]int64)
assert.Equal(t, want, ids)
}).
Return(want, nil).
Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
got, err := l.acquireAvailableJobs(ctx, availableJobs)
assert.Nil(t, err)
assert.Equal(t, want, got)
})
t.Run("RefreshAndFails", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, &actions.MessageQueueTokenExpiredError{}).Twice()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 3,
},
},
}
got, err := l.acquireAvailableJobs(ctx, availableJobs)
assert.NotNil(t, err)
assert.Nil(t, got)
})
}
func TestListener_parseMessage(t *testing.T) {
t.Run("FailOnEmptyStatistics", func(t *testing.T) {
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Statistics: nil,
}
l := &Listener{}
parsedMsg, err := l.parseMessage(context.Background(), msg)
assert.Error(t, err)
assert.Nil(t, parsedMsg)
})
t.Run("FailOnIncorrectMessageType", func(t *testing.T) {
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerMessages", // arbitrary message type
Statistics: &actions.RunnerScaleSetStatistic{},
}
l := &Listener{}
parsedMsg, err := l.parseMessage(context.Background(), msg)
assert.Error(t, err)
assert.Nil(t, parsedMsg)
})
t.Run("ParseAll", func(t *testing.T) {
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Body: "",
Statistics: &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
TotalAcquiredJobs: 2,
TotalAssignedJobs: 3,
TotalRunningJobs: 4,
TotalRegisteredRunners: 5,
TotalBusyRunners: 6,
TotalIdleRunners: 7,
},
}
var batchedMessages []any
jobsAvailable := []*actions.JobAvailable{
{
AcquireJobUrl: "https://github.com/example",
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAvailable,
},
RunnerRequestId: 1,
},
},
{
AcquireJobUrl: "https://github.com/example",
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAvailable,
},
RunnerRequestId: 2,
},
},
}
for _, msg := range jobsAvailable {
batchedMessages = append(batchedMessages, msg)
}
jobsAssigned := []*actions.JobAssigned{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAssigned,
},
RunnerRequestId: 3,
},
},
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAssigned,
},
RunnerRequestId: 4,
},
},
}
for _, msg := range jobsAssigned {
batchedMessages = append(batchedMessages, msg)
}
jobsStarted := []*actions.JobStarted{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobStarted,
},
RunnerRequestId: 5,
},
RunnerId: 2,
RunnerName: "runner2",
},
}
for _, msg := range jobsStarted {
batchedMessages = append(batchedMessages, msg)
}
jobsCompleted := []*actions.JobCompleted{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobCompleted,
},
RunnerRequestId: 6,
},
Result: "success",
RunnerId: 1,
RunnerName: "runner1",
},
}
for _, msg := range jobsCompleted {
batchedMessages = append(batchedMessages, msg)
}
b, err := json.Marshal(batchedMessages)
require.NoError(t, err)
msg.Body = string(b)
l := &Listener{}
parsedMsg, err := l.parseMessage(context.Background(), msg)
require.NoError(t, err)
assert.Equal(t, msg.Statistics, parsedMsg.statistics)
assert.Equal(t, jobsAvailable, parsedMsg.jobsAvailable)
assert.Equal(t, jobsStarted, parsedMsg.jobsStarted)
assert.Equal(t, jobsCompleted, parsedMsg.jobsCompleted)
})
}

View File

@@ -0,0 +1,205 @@
package listener
import (
"context"
"encoding/json"
"testing"
listenermocks "github.com/actions/actions-runner-controller/cmd/ghalistener/listener/mocks"
metricsmocks "github.com/actions/actions-runner-controller/cmd/ghalistener/metrics/mocks"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestInitialMetrics(t *testing.T) {
t.Parallel()
t.Run("SetStaticMetrics", func(t *testing.T) {
t.Parallel()
metrics := metricsmocks.NewPublisher(t)
minRunners := 5
maxRunners := 10
metrics.On("PublishStatic", minRunners, maxRunners).Once()
config := Config{
Client: listenermocks.NewClient(t),
ScaleSetID: 1,
Metrics: metrics,
MinRunners: minRunners,
MaxRunners: maxRunners,
}
l, err := New(config)
assert.Nil(t, err)
assert.NotNil(t, l)
})
t.Run("InitialMessageStatistics", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
sessionStatistics := &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
TotalAcquiredJobs: 2,
TotalAssignedJobs: 3,
TotalRunningJobs: 4,
TotalRegisteredRunners: 5,
TotalBusyRunners: 6,
TotalIdleRunners: 7,
}
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: sessionStatistics,
}
metrics := metricsmocks.NewPublisher(t)
metrics.On("PublishStatic", mock.Anything, mock.Anything).Once()
metrics.On("PublishStatistics", sessionStatistics).Once()
metrics.On("PublishDesiredRunners", sessionStatistics.TotalAssignedJobs).
Run(
func(mock.Arguments) {
cancel()
},
).Once()
config := Config{
Client: listenermocks.NewClient(t),
ScaleSetID: 1,
Metrics: metrics,
}
client := listenermocks.NewClient(t)
client.On("CreateMessageSession", mock.Anything, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessageSession", mock.Anything, session.RunnerScaleSet.Id, session.SessionId).Return(nil).Once()
config.Client = client
handler := listenermocks.NewHandler(t)
handler.On("HandleDesiredRunnerCount", mock.Anything, sessionStatistics.TotalAssignedJobs, 0).
Return(sessionStatistics.TotalAssignedJobs, nil).
Once()
l, err := New(config)
assert.Nil(t, err)
assert.NotNil(t, l)
assert.ErrorIs(t, context.Canceled, l.Listen(ctx, handler))
})
}
func TestHandleMessageMetrics(t *testing.T) {
t.Parallel()
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Body: "",
Statistics: &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
TotalAcquiredJobs: 2,
TotalAssignedJobs: 3,
TotalRunningJobs: 4,
TotalRegisteredRunners: 5,
TotalBusyRunners: 6,
TotalIdleRunners: 7,
},
}
var batchedMessages []any
jobsStarted := []*actions.JobStarted{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobStarted,
},
RunnerRequestId: 8,
},
RunnerId: 3,
RunnerName: "runner3",
},
}
for _, msg := range jobsStarted {
batchedMessages = append(batchedMessages, msg)
}
jobsCompleted := []*actions.JobCompleted{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobCompleted,
},
RunnerRequestId: 6,
},
Result: "success",
RunnerId: 1,
RunnerName: "runner1",
},
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobCompleted,
},
RunnerRequestId: 7,
},
Result: "success",
RunnerId: 2,
RunnerName: "runner2",
},
}
for _, msg := range jobsCompleted {
batchedMessages = append(batchedMessages, msg)
}
b, err := json.Marshal(batchedMessages)
require.NoError(t, err)
msg.Body = string(b)
desiredResult := 4
metrics := metricsmocks.NewPublisher(t)
metrics.On("PublishStatic", 0, 0).Once()
metrics.On("PublishStatistics", msg.Statistics).Once()
metrics.On("PublishJobCompleted", jobsCompleted[0]).Once()
metrics.On("PublishJobCompleted", jobsCompleted[1]).Once()
metrics.On("PublishJobStarted", jobsStarted[0]).Once()
metrics.On("PublishDesiredRunners", desiredResult).Once()
handler := listenermocks.NewHandler(t)
handler.On("HandleJobStarted", mock.Anything, jobsStarted[0]).Return(nil).Once()
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 2).Return(desiredResult, nil).Once()
client := listenermocks.NewClient(t)
client.On("DeleteMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
config := Config{
Client: listenermocks.NewClient(t),
ScaleSetID: 1,
Metrics: metrics,
}
l, err := New(config)
require.NoError(t, err)
l.client = client
l.session = &actions.RunnerScaleSetSession{
OwnerName: "",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "",
MessageQueueAccessToken: "",
Statistics: &actions.RunnerScaleSetStatistic{},
}
err = l.handleMessage(context.Background(), handler, msg)
require.NoError(t, err)
}

View File

@@ -0,0 +1,190 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package mocks
import (
context "context"
actions "github.com/actions/actions-runner-controller/github/actions"
mock "github.com/stretchr/testify/mock"
uuid "github.com/google/uuid"
)
// Client is an autogenerated mock type for the Client type
type Client struct {
mock.Mock
}
// AcquireJobs provides a mock function with given fields: ctx, runnerScaleSetId, messageQueueAccessToken, requestIds
func (_m *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) {
ret := _m.Called(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
var r0 []int64
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, string, []int64) ([]int64, error)); ok {
return rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
}
if rf, ok := ret.Get(0).(func(context.Context, int, string, []int64) []int64); ok {
r0 = rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]int64)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int, string, []int64) error); ok {
r1 = rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CreateMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, owner
func (_m *Client) CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*actions.RunnerScaleSetSession, error) {
ret := _m.Called(ctx, runnerScaleSetId, owner)
var r0 *actions.RunnerScaleSetSession
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, string) (*actions.RunnerScaleSetSession, error)); ok {
return rf(ctx, runnerScaleSetId, owner)
}
if rf, ok := ret.Get(0).(func(context.Context, int, string) *actions.RunnerScaleSetSession); ok {
r0 = rf(ctx, runnerScaleSetId, owner)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*actions.RunnerScaleSetSession)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int, string) error); ok {
r1 = rf(ctx, runnerScaleSetId, owner)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DeleteMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, messageId
func (_m *Client) DeleteMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, messageId int64) error {
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, messageId)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) error); ok {
r0 = rf(ctx, messageQueueUrl, messageQueueAccessToken, messageId)
} else {
r0 = ret.Error(0)
}
return r0
}
// DeleteMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
func (_m *Client) DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error {
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) error); ok {
r0 = rf(ctx, runnerScaleSetId, sessionId)
} else {
r0 = ret.Error(0)
}
return r0
}
// GetAcquirableJobs provides a mock function with given fields: ctx, runnerScaleSetId
func (_m *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*actions.AcquirableJobList, error) {
ret := _m.Called(ctx, runnerScaleSetId)
var r0 *actions.AcquirableJobList
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int) (*actions.AcquirableJobList, error)); ok {
return rf(ctx, runnerScaleSetId)
}
if rf, ok := ret.Get(0).(func(context.Context, int) *actions.AcquirableJobList); ok {
r0 = rf(ctx, runnerScaleSetId)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*actions.AcquirableJobList)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int) error); ok {
r1 = rf(ctx, runnerScaleSetId)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity
func (_m *Client) GetMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error) {
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
var r0 *actions.RunnerScaleSetMessage
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64, int) (*actions.RunnerScaleSetMessage, error)); ok {
return rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
}
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64, int) *actions.RunnerScaleSetMessage); ok {
r0 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*actions.RunnerScaleSetMessage)
}
}
if rf, ok := ret.Get(1).(func(context.Context, string, string, int64, int) error); ok {
r1 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// RefreshMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
func (_m *Client) RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*actions.RunnerScaleSetSession, error) {
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
var r0 *actions.RunnerScaleSetSession
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) (*actions.RunnerScaleSetSession, error)); ok {
return rf(ctx, runnerScaleSetId, sessionId)
}
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) *actions.RunnerScaleSetSession); ok {
r0 = rf(ctx, runnerScaleSetId, sessionId)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*actions.RunnerScaleSetSession)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int, *uuid.UUID) error); ok {
r1 = rf(ctx, runnerScaleSetId, sessionId)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewClient(t interface {
mock.TestingT
Cleanup(func())
}) *Client {
mock := &Client{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -0,0 +1,68 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package mocks
import (
context "context"
actions "github.com/actions/actions-runner-controller/github/actions"
mock "github.com/stretchr/testify/mock"
)
// Handler is an autogenerated mock type for the Handler type
type Handler struct {
mock.Mock
}
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count, jobsCompleted
func (_m *Handler) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error) {
ret := _m.Called(ctx, count, jobsCompleted)
var r0 int
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, int) (int, error)); ok {
return rf(ctx, count, jobsCompleted)
}
if rf, ok := ret.Get(0).(func(context.Context, int, int) int); ok {
r0 = rf(ctx, count, jobsCompleted)
} else {
r0 = ret.Get(0).(int)
}
if rf, ok := ret.Get(1).(func(context.Context, int, int) error); ok {
r1 = rf(ctx, count, jobsCompleted)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// HandleJobStarted provides a mock function with given fields: ctx, jobInfo
func (_m *Handler) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
ret := _m.Called(ctx, jobInfo)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *actions.JobStarted) error); ok {
r0 = rf(ctx, jobInfo)
} else {
r0 = ret.Error(0)
}
return r0
}
// NewHandler creates a new instance of Handler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewHandler(t interface {
mock.TestingT
Cleanup(func())
}) *Handler {
mock := &Handler{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

40
cmd/ghalistener/main.go Normal file
View File

@@ -0,0 +1,40 @@
package main
import (
"context"
"fmt"
"log"
"os"
"os/signal"
"syscall"
"github.com/actions/actions-runner-controller/cmd/ghalistener/app"
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
)
func main() {
configPath, ok := os.LookupEnv("LISTENER_CONFIG_PATH")
if !ok {
fmt.Fprintf(os.Stderr, "Error: LISTENER_CONFIG_PATH environment variable is not set\n")
os.Exit(1)
}
config, err := config.Read(configPath)
if err != nil {
log.Printf("Failed to read config: %v", err)
os.Exit(1)
}
app, err := app.New(config)
if err != nil {
log.Printf("Failed to initialize app: %v", err)
os.Exit(1)
}
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer stop()
if err := app.Run(ctx); err != nil {
log.Printf("Application returned an error: %v", err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,392 @@
package metrics
import (
"context"
"net/http"
"strconv"
"time"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
const (
labelKeyRunnerScaleSetName = "name"
labelKeyRunnerScaleSetNamespace = "namespace"
labelKeyEnterprise = "enterprise"
labelKeyOrganization = "organization"
labelKeyRepository = "repository"
labelKeyJobName = "job_name"
labelKeyJobWorkflowRef = "job_workflow_ref"
labelKeyEventName = "event_name"
labelKeyJobResult = "job_result"
labelKeyRunnerID = "runner_id"
labelKeyRunnerName = "runner_name"
)
const githubScaleSetSubsystem = "gha"
// labels
var (
scaleSetLabels = []string{
labelKeyRunnerScaleSetName,
labelKeyRepository,
labelKeyOrganization,
labelKeyEnterprise,
labelKeyRunnerScaleSetNamespace,
}
jobLabels = []string{
labelKeyRepository,
labelKeyOrganization,
labelKeyEnterprise,
labelKeyJobName,
labelKeyJobWorkflowRef,
labelKeyEventName,
}
completedJobsTotalLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
jobExecutionDurationLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
startedJobsTotalLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
jobStartupDurationLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
)
var (
assignedJobs = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "assigned_jobs",
Help: "Number of jobs assigned to this scale set.",
},
scaleSetLabels,
)
runningJobs = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "running_jobs",
Help: "Number of jobs running (or about to be run).",
},
scaleSetLabels,
)
registeredRunners = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "registered_runners",
Help: "Number of runners registered by the scale set.",
},
scaleSetLabels,
)
busyRunners = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "busy_runners",
Help: "Number of registered runners running a job.",
},
scaleSetLabels,
)
minRunners = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "min_runners",
Help: "Minimum number of runners.",
},
scaleSetLabels,
)
maxRunners = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "max_runners",
Help: "Maximum number of runners.",
},
scaleSetLabels,
)
desiredRunners = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "desired_runners",
Help: "Number of runners desired by the scale set.",
},
scaleSetLabels,
)
idleRunners = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "idle_runners",
Help: "Number of registered runners not running a job.",
},
scaleSetLabels,
)
startedJobsTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Subsystem: githubScaleSetSubsystem,
Name: "started_jobs_total",
Help: "Total number of jobs started.",
},
startedJobsTotalLabels,
)
completedJobsTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "completed_jobs_total",
Help: "Total number of jobs completed.",
Subsystem: githubScaleSetSubsystem,
},
completedJobsTotalLabels,
)
jobStartupDurationSeconds = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Subsystem: githubScaleSetSubsystem,
Name: "job_startup_duration_seconds",
Help: "Time spent waiting for workflow job to get started on the runner owned by the scale set (in seconds).",
Buckets: runtimeBuckets,
},
jobStartupDurationLabels,
)
jobExecutionDurationSeconds = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Subsystem: githubScaleSetSubsystem,
Name: "job_execution_duration_seconds",
Help: "Time spent executing workflow jobs by the scale set (in seconds).",
Buckets: runtimeBuckets,
},
jobExecutionDurationLabels,
)
)
var runtimeBuckets []float64 = []float64{
0.01,
0.05,
0.1,
0.5,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
12,
15,
18,
20,
25,
30,
40,
50,
60,
70,
80,
90,
100,
110,
120,
150,
180,
210,
240,
300,
360,
420,
480,
540,
600,
900,
1200,
1800,
2400,
3000,
3600,
}
type baseLabels struct {
scaleSetName string
scaleSetNamespace string
enterprise string
organization string
repository string
}
func (b *baseLabels) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels {
return prometheus.Labels{
labelKeyEnterprise: b.enterprise,
labelKeyOrganization: jobBase.OwnerName,
labelKeyRepository: jobBase.RepositoryName,
labelKeyJobName: jobBase.JobDisplayName,
labelKeyJobWorkflowRef: jobBase.JobWorkflowRef,
labelKeyEventName: jobBase.EventName,
}
}
func (b *baseLabels) scaleSetLabels() prometheus.Labels {
return prometheus.Labels{
labelKeyRunnerScaleSetName: b.scaleSetName,
labelKeyRunnerScaleSetNamespace: b.scaleSetNamespace,
labelKeyEnterprise: b.enterprise,
labelKeyOrganization: b.organization,
labelKeyRepository: b.repository,
}
}
func (b *baseLabels) completedJobLabels(msg *actions.JobCompleted) prometheus.Labels {
l := b.jobLabels(&msg.JobMessageBase)
l[labelKeyRunnerID] = strconv.Itoa(msg.RunnerId)
l[labelKeyJobResult] = msg.Result
l[labelKeyRunnerName] = msg.RunnerName
return l
}
func (b *baseLabels) startedJobLabels(msg *actions.JobStarted) prometheus.Labels {
l := b.jobLabels(&msg.JobMessageBase)
l[labelKeyRunnerID] = strconv.Itoa(msg.RunnerId)
l[labelKeyRunnerName] = msg.RunnerName
return l
}
//go:generate mockery --name Publisher --output ./mocks --outpkg mocks --case underscore
type Publisher interface {
PublishStatic(min, max int)
PublishStatistics(stats *actions.RunnerScaleSetStatistic)
PublishJobStarted(msg *actions.JobStarted)
PublishJobCompleted(msg *actions.JobCompleted)
PublishDesiredRunners(count int)
}
//go:generate mockery --name ServerPublisher --output ./mocks --outpkg mocks --case underscore
type ServerPublisher interface {
Publisher
ListenAndServe(ctx context.Context) error
}
var (
_ Publisher = &discard{}
_ ServerPublisher = &exporter{}
)
var Discard Publisher = &discard{}
type exporter struct {
logger logr.Logger
baseLabels
srv *http.Server
}
type ExporterConfig struct {
ScaleSetName string
ScaleSetNamespace string
Enterprise string
Organization string
Repository string
ServerAddr string
ServerEndpoint string
Logger logr.Logger
}
func NewExporter(config ExporterConfig) ServerPublisher {
reg := prometheus.NewRegistry()
reg.MustRegister(
assignedJobs,
runningJobs,
registeredRunners,
busyRunners,
minRunners,
maxRunners,
desiredRunners,
idleRunners,
startedJobsTotal,
completedJobsTotal,
jobStartupDurationSeconds,
jobExecutionDurationSeconds,
)
mux := http.NewServeMux()
mux.Handle(
config.ServerEndpoint,
promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}),
)
return &exporter{
logger: config.Logger.WithName("metrics"),
baseLabels: baseLabels{
scaleSetName: config.ScaleSetName,
scaleSetNamespace: config.ScaleSetNamespace,
enterprise: config.Enterprise,
organization: config.Organization,
repository: config.Repository,
},
srv: &http.Server{
Addr: config.ServerAddr,
Handler: mux,
},
}
}
func (e *exporter) ListenAndServe(ctx context.Context) error {
e.logger.Info("starting metrics server", "addr", e.srv.Addr)
go func() {
<-ctx.Done()
e.logger.Info("stopping metrics server", "err", ctx.Err())
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
e.srv.Shutdown(ctx)
}()
return e.srv.ListenAndServe()
}
func (m *exporter) PublishStatic(min, max int) {
l := m.scaleSetLabels()
maxRunners.With(l).Set(float64(max))
minRunners.With(l).Set(float64(min))
}
func (e *exporter) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
l := e.scaleSetLabels()
assignedJobs.With(l).Set(float64(stats.TotalAssignedJobs))
runningJobs.With(l).Set(float64(stats.TotalRunningJobs))
registeredRunners.With(l).Set(float64(stats.TotalRegisteredRunners))
busyRunners.With(l).Set(float64(stats.TotalBusyRunners))
idleRunners.With(l).Set(float64(stats.TotalIdleRunners))
}
func (e *exporter) PublishJobStarted(msg *actions.JobStarted) {
l := e.startedJobLabels(msg)
startedJobsTotal.With(l).Inc()
startupDuration := msg.JobMessageBase.RunnerAssignTime.Unix() - msg.JobMessageBase.ScaleSetAssignTime.Unix()
jobStartupDurationSeconds.With(l).Observe(float64(startupDuration))
}
func (e *exporter) PublishJobCompleted(msg *actions.JobCompleted) {
l := e.completedJobLabels(msg)
completedJobsTotal.With(l).Inc()
executionDuration := msg.JobMessageBase.FinishTime.Unix() - msg.JobMessageBase.RunnerAssignTime.Unix()
jobExecutionDurationSeconds.With(l).Observe(float64(executionDuration))
}
func (m *exporter) PublishDesiredRunners(count int) {
desiredRunners.With(m.scaleSetLabels()).Set(float64(count))
}
type discard struct{}
func (*discard) PublishStatic(int, int) {}
func (*discard) PublishStatistics(*actions.RunnerScaleSetStatistic) {}
func (*discard) PublishJobStarted(*actions.JobStarted) {}
func (*discard) PublishJobCompleted(*actions.JobCompleted) {}
func (*discard) PublishDesiredRunners(int) {}

View File

@@ -0,0 +1,53 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package mocks
import (
actions "github.com/actions/actions-runner-controller/github/actions"
mock "github.com/stretchr/testify/mock"
)
// Publisher is an autogenerated mock type for the Publisher type
type Publisher struct {
mock.Mock
}
// PublishDesiredRunners provides a mock function with given fields: count
func (_m *Publisher) PublishDesiredRunners(count int) {
_m.Called(count)
}
// PublishJobCompleted provides a mock function with given fields: msg
func (_m *Publisher) PublishJobCompleted(msg *actions.JobCompleted) {
_m.Called(msg)
}
// PublishJobStarted provides a mock function with given fields: msg
func (_m *Publisher) PublishJobStarted(msg *actions.JobStarted) {
_m.Called(msg)
}
// PublishStatic provides a mock function with given fields: min, max
func (_m *Publisher) PublishStatic(min int, max int) {
_m.Called(min, max)
}
// PublishStatistics provides a mock function with given fields: stats
func (_m *Publisher) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
_m.Called(stats)
}
// NewPublisher creates a new instance of Publisher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewPublisher(t interface {
mock.TestingT
Cleanup(func())
}) *Publisher {
mock := &Publisher{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -0,0 +1,69 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package mocks
import (
context "context"
actions "github.com/actions/actions-runner-controller/github/actions"
mock "github.com/stretchr/testify/mock"
)
// ServerPublisher is an autogenerated mock type for the ServerPublisher type
type ServerPublisher struct {
mock.Mock
}
// ListenAndServe provides a mock function with given fields: ctx
func (_m *ServerPublisher) ListenAndServe(ctx context.Context) error {
ret := _m.Called(ctx)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// PublishDesiredRunners provides a mock function with given fields: count
func (_m *ServerPublisher) PublishDesiredRunners(count int) {
_m.Called(count)
}
// PublishJobCompleted provides a mock function with given fields: msg
func (_m *ServerPublisher) PublishJobCompleted(msg *actions.JobCompleted) {
_m.Called(msg)
}
// PublishJobStarted provides a mock function with given fields: msg
func (_m *ServerPublisher) PublishJobStarted(msg *actions.JobStarted) {
_m.Called(msg)
}
// PublishStatic provides a mock function with given fields: min, max
func (_m *ServerPublisher) PublishStatic(min int, max int) {
_m.Called(min, max)
}
// PublishStatistics provides a mock function with given fields: stats
func (_m *ServerPublisher) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
_m.Called(stats)
}
// NewServerPublisher creates a new instance of ServerPublisher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewServerPublisher(t interface {
mock.TestingT
Cleanup(func())
}) *ServerPublisher {
mock := &ServerPublisher{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -0,0 +1,257 @@
package worker
import (
"context"
"encoding/json"
"fmt"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/logging"
jsonpatch "github.com/evanphx/json-patch"
"github.com/go-logr/logr"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
const workerName = "kubernetesworker"
type Option func(*Worker)
func WithLogger(logger logr.Logger) Option {
return func(w *Worker) {
logger = logger.WithName(workerName)
w.logger = &logger
}
}
type Config struct {
EphemeralRunnerSetNamespace string
EphemeralRunnerSetName string
MaxRunners int
MinRunners int
}
// The Worker's role is to process the messages it receives from the listener.
// It then initiates Kubernetes API requests to carry out the necessary actions.
type Worker struct {
clientset *kubernetes.Clientset
config Config
lastPatch int
patchSeq int
logger *logr.Logger
}
var _ listener.Handler = (*Worker)(nil)
func New(config Config, options ...Option) (*Worker, error) {
w := &Worker{
config: config,
lastPatch: -1,
patchSeq: -1,
}
conf, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
clientset, err := kubernetes.NewForConfig(conf)
if err != nil {
return nil, err
}
w.clientset = clientset
for _, option := range options {
option(w)
}
if err := w.applyDefaults(); err != nil {
return nil, err
}
return w, nil
}
func (w *Worker) applyDefaults() error {
if w.logger == nil {
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatJSON)
if err != nil {
return fmt.Errorf("NewLogger failed: %w", err)
}
logger = logger.WithName(workerName)
w.logger = &logger
}
return nil
}
// HandleJobStarted updates the job information for the ephemeral runner when a job is started.
// It takes a context and a jobInfo parameter which contains the details of the started job.
// This update marks the ephemeral runner so that the controller would have more context
// about the ephemeral runner that should not be deleted when scaling down.
// It returns an error if there is any issue with updating the job information.
func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
w.logger.Info("Updating job info for the runner",
"runnerName", jobInfo.RunnerName,
"ownerName", jobInfo.OwnerName,
"repoName", jobInfo.RepositoryName,
"workflowRef", jobInfo.JobWorkflowRef,
"workflowRunId", jobInfo.WorkflowRunId,
"jobDisplayName", jobInfo.JobDisplayName,
"requestId", jobInfo.RunnerRequestId)
original, err := json.Marshal(&v1alpha1.EphemeralRunner{})
if err != nil {
return fmt.Errorf("failed to marshal empty ephemeral runner: %w", err)
}
patch, err := json.Marshal(
&v1alpha1.EphemeralRunner{
Status: v1alpha1.EphemeralRunnerStatus{
JobRequestId: jobInfo.RunnerRequestId,
JobRepositoryName: fmt.Sprintf("%s/%s", jobInfo.OwnerName, jobInfo.RepositoryName),
WorkflowRunId: jobInfo.WorkflowRunId,
JobWorkflowRef: jobInfo.JobWorkflowRef,
JobDisplayName: jobInfo.JobDisplayName,
},
},
)
if err != nil {
return fmt.Errorf("failed to marshal ephemeral runner patch: %w", err)
}
mergePatch, err := jsonpatch.CreateMergePatch(original, patch)
if err != nil {
return fmt.Errorf("failed to create merge patch json for ephemeral runner: %w", err)
}
w.logger.Info("Updating ephemeral runner with merge patch", "json", string(mergePatch))
patchedStatus := &v1alpha1.EphemeralRunner{}
err = w.clientset.RESTClient().
Patch(types.MergePatchType).
Prefix("apis", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version).
Namespace(w.config.EphemeralRunnerSetNamespace).
Resource("EphemeralRunners").
Name(jobInfo.RunnerName).
SubResource("status").
Body(mergePatch).
Do(ctx).
Into(patchedStatus)
if err != nil {
if kerrors.IsNotFound(err) {
w.logger.Info("Ephemeral runner not found, skipping patching of ephemeral runner status", "runnerName", jobInfo.RunnerName)
return nil
}
return fmt.Errorf("could not patch ephemeral runner status, patch JSON: %s, error: %w", string(mergePatch), err)
}
w.logger.Info("Ephemeral runner status updated with the merge patch successfully.")
return nil
}
// HandleDesiredRunnerCount handles the desired runner count by scaling the ephemeral runner set.
// The function calculates the target runner count based on the minimum and maximum runner count configuration.
// If the target runner count is the same as the last patched count, it skips patching and returns nil.
// Otherwise, it creates a merge patch JSON for updating the ephemeral runner set with the desired count.
// The function then scales the ephemeral runner set by applying the merge patch.
// Finally, it logs the scaled ephemeral runner set details and returns nil if successful.
// If any error occurs during the process, it returns an error with a descriptive message.
func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count, jobsCompleted int) (int, error) {
patchID := w.setDesiredWorkerState(count, jobsCompleted)
original, err := json.Marshal(
&v1alpha1.EphemeralRunnerSet{
Spec: v1alpha1.EphemeralRunnerSetSpec{
Replicas: -1,
PatchID: -1,
},
},
)
if err != nil {
return 0, fmt.Errorf("failed to marshal empty ephemeral runner set: %w", err)
}
patch, err := json.Marshal(
&v1alpha1.EphemeralRunnerSet{
Spec: v1alpha1.EphemeralRunnerSetSpec{
Replicas: w.lastPatch,
PatchID: patchID,
},
},
)
if err != nil {
w.logger.Error(err, "could not marshal patch ephemeral runner set")
return 0, err
}
w.logger.Info("Compare", "original", string(original), "patch", string(patch))
mergePatch, err := jsonpatch.CreateMergePatch(original, patch)
if err != nil {
return 0, fmt.Errorf("failed to create merge patch json for ephemeral runner set: %w", err)
}
w.logger.Info("Preparing EphemeralRunnerSet update", "json", string(mergePatch))
patchedEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{}
err = w.clientset.RESTClient().
Patch(types.MergePatchType).
Prefix("apis", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version).
Namespace(w.config.EphemeralRunnerSetNamespace).
Resource("ephemeralrunnersets").
Name(w.config.EphemeralRunnerSetName).
Body([]byte(mergePatch)).
Do(ctx).
Into(patchedEphemeralRunnerSet)
if err != nil {
return 0, fmt.Errorf("could not patch ephemeral runner set , patch JSON: %s, error: %w", string(mergePatch), err)
}
w.logger.Info("Ephemeral runner set scaled.",
"namespace", w.config.EphemeralRunnerSetNamespace,
"name", w.config.EphemeralRunnerSetName,
"replicas", patchedEphemeralRunnerSet.Spec.Replicas,
)
return w.lastPatch, nil
}
// calculateDesiredState calculates the desired state of the worker based on the desired count and the the number of jobs completed.
func (w *Worker) setDesiredWorkerState(count, jobsCompleted int) int {
// Max runners should always be set by the resource builder either to the configured value,
// or the maximum int32 (resourcebuilder.newAutoScalingListener()).
targetRunnerCount := min(w.config.MinRunners+count, w.config.MaxRunners)
w.patchSeq++
desiredPatchID := w.patchSeq
if count == 0 && jobsCompleted == 0 { // empty batch
targetRunnerCount = max(w.lastPatch, targetRunnerCount)
if targetRunnerCount == w.config.MinRunners {
// We have an empty batch, and the last patch was the min runners.
// Since this is an empty batch, and we are at the min runners, they should all be idle.
// If controller created few more pods on accident (during scale down events),
// this situation allows the controller to scale down to the min runners.
// However, it is important to keep the patch sequence increasing so we don't ignore one batch.
desiredPatchID = 0
}
}
w.lastPatch = targetRunnerCount
w.logger.Info(
"Calculated target runner count",
"assigned job", count,
"decision", targetRunnerCount,
"min", w.config.MinRunners,
"max", w.config.MaxRunners,
"currentRunnerCount", w.lastPatch,
"jobsCompleted", jobsCompleted,
)
return desiredPatchID
}

View File

@@ -0,0 +1,326 @@
package worker
import (
"math"
"testing"
"github.com/go-logr/logr"
"github.com/stretchr/testify/assert"
)
func TestSetDesiredWorkerState_MinMaxDefaults(t *testing.T) {
logger := logr.Discard()
newEmptyWorker := func() *Worker {
return &Worker{
config: Config{
MinRunners: 0,
MaxRunners: math.MaxInt32,
},
lastPatch: -1,
patchSeq: -1,
logger: &logger,
}
}
t.Run("init calculate with acquired 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(0, 0)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
assert.Equal(t, 0, patchID)
})
t.Run("init calculate with acquired 1", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 0)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
assert.Equal(t, 0, patchID)
})
t.Run("increment patch when job done", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
assert.Equal(t, 1, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("increment patch when called with same parameters", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(1, 0)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("calculate desired scale when acquired > 0 and completed > 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 1)
assert.Equal(t, 0, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("re-use the last state when acquired == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("adjust when acquired == 0 and completed == 1", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 1)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
assert.Equal(t, 1, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
}
func TestSetDesiredWorkerState_MinSet(t *testing.T) {
logger := logr.Discard()
newEmptyWorker := func() *Worker {
return &Worker{
config: Config{
MinRunners: 1,
MaxRunners: math.MaxInt32,
},
lastPatch: -1,
patchSeq: -1,
logger: &logger,
}
}
t.Run("initial scale when acquired == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(0, 0)
assert.Equal(t, 0, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("re-use the old state on count == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 1, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("request back to 0 on job done", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("desired patch is 0 but sequence continues on empty batch and min runners", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(3, 0)
assert.Equal(t, 0, patchID)
assert.Equal(t, 4, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
patchID = w.setDesiredWorkerState(0, 3)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
// Empty batch on min runners
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 0, patchID) // forcing the state
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 2, w.patchSeq)
})
}
func TestSetDesiredWorkerState_MaxSet(t *testing.T) {
logger := logr.Discard()
newEmptyWorker := func() *Worker {
return &Worker{
config: Config{
MinRunners: 0,
MaxRunners: 5,
},
lastPatch: -1,
patchSeq: -1,
logger: &logger,
}
}
t.Run("initial scale when acquired == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(0, 0)
assert.Equal(t, 0, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("re-use the old state on count == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 1, patchID)
assert.Equal(t, 2, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("request back to 0 on job done", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
assert.Equal(t, 1, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("scale up to max when count > max", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(6, 0)
assert.Equal(t, 0, patchID)
assert.Equal(t, 5, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("scale to max when count == max", func(t *testing.T) {
w := newEmptyWorker()
w.setDesiredWorkerState(5, 0)
assert.Equal(t, 5, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("scale to max when count > max and completed > 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(6, 1)
assert.Equal(t, 1, patchID)
assert.Equal(t, 5, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("scale back to 0 when count was > max", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(6, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
assert.Equal(t, 1, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("force 0 on empty batch and last patch == min runners", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(3, 0)
assert.Equal(t, 0, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
patchID = w.setDesiredWorkerState(0, 3)
assert.Equal(t, 1, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
// Empty batch on min runners
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 0, patchID) // forcing the state
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 2, w.patchSeq)
})
}
func TestSetDesiredWorkerState_MinMaxSet(t *testing.T) {
logger := logr.Discard()
newEmptyWorker := func() *Worker {
return &Worker{
config: Config{
MinRunners: 1,
MaxRunners: 3,
},
lastPatch: -1,
patchSeq: -1,
logger: &logger,
}
}
t.Run("initial scale when acquired == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(0, 0)
assert.Equal(t, 0, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("re-use the old state on count == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 1, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("scale to min when count == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("scale up to max when count > max", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(4, 0)
assert.Equal(t, 0, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("scale to max when count == max", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(3, 0)
assert.Equal(t, 0, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("force 0 on empty batch and last patch == min runners", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(3, 0)
assert.Equal(t, 0, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
patchID = w.setDesiredWorkerState(0, 3)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
// Empty batch on min runners
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 0, patchID) // forcing the state
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 2, w.patchSeq)
})
}

View File

@@ -129,7 +129,7 @@ func (m *AutoScalerClient) Close() error {
return m.client.Close() return m.client.Close()
} }
func (m *AutoScalerClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error) error { func (m *AutoScalerClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error, maxCapacity int) error {
if m.initialMessage != nil { if m.initialMessage != nil {
err := handler(m.initialMessage) err := handler(m.initialMessage)
if err != nil { if err != nil {
@@ -141,7 +141,7 @@ func (m *AutoScalerClient) GetRunnerScaleSetMessage(ctx context.Context, handler
} }
for { for {
message, err := m.client.GetMessage(ctx, m.lastMessageId) message, err := m.client.GetMessage(ctx, m.lastMessageId, maxCapacity)
if err != nil { if err != nil {
return fmt.Errorf("get message failed from refreshing client. %w", err) return fmt.Errorf("get message failed from refreshing client. %w", err)
} }

View File

@@ -317,7 +317,7 @@ func TestGetRunnerScaleSetMessage(t *testing.T) {
Statistics: &actions.RunnerScaleSetStatistic{}, Statistics: &actions.RunnerScaleSetStatistic{},
} }
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
mockSessionClient.On("GetMessage", ctx, int64(0)).Return(&actions.RunnerScaleSetMessage{ mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(&actions.RunnerScaleSetMessage{
MessageId: 1, MessageId: 1,
MessageType: "test", MessageType: "test",
Body: "test", Body: "test",
@@ -332,7 +332,7 @@ func TestGetRunnerScaleSetMessage(t *testing.T) {
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
return nil return nil
}) }, 10)
assert.NoError(t, err, "Error getting message") assert.NoError(t, err, "Error getting message")
assert.Equal(t, int64(0), asClient.lastMessageId, "Initial message") assert.Equal(t, int64(0), asClient.lastMessageId, "Initial message")
@@ -340,7 +340,7 @@ func TestGetRunnerScaleSetMessage(t *testing.T) {
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
return nil return nil
}) }, 10)
assert.NoError(t, err, "Error getting message") assert.NoError(t, err, "Error getting message")
assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated") assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated")
@@ -368,7 +368,7 @@ func TestGetRunnerScaleSetMessage_HandleFailed(t *testing.T) {
Statistics: &actions.RunnerScaleSetStatistic{}, Statistics: &actions.RunnerScaleSetStatistic{},
} }
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
mockSessionClient.On("GetMessage", ctx, int64(0)).Return(&actions.RunnerScaleSetMessage{ mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(&actions.RunnerScaleSetMessage{
MessageId: 1, MessageId: 1,
MessageType: "test", MessageType: "test",
Body: "test", Body: "test",
@@ -383,14 +383,14 @@ func TestGetRunnerScaleSetMessage_HandleFailed(t *testing.T) {
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
return nil return nil
}) }, 10)
assert.NoError(t, err, "Error getting message") assert.NoError(t, err, "Error getting message")
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
return fmt.Errorf("error") return fmt.Errorf("error")
}) }, 10)
assert.ErrorContains(t, err, "handle message failed. error", "Error getting message") assert.ErrorContains(t, err, "handle message failed. error", "Error getting message")
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should not be updated") assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should not be updated")
@@ -419,7 +419,7 @@ func TestGetRunnerScaleSetMessage_HandleInitialMessage(t *testing.T) {
TotalAssignedJobs: 2, TotalAssignedJobs: 2,
}, },
} }
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything, mock.Anything).Return(session, nil)
mockActionsClient.On("GetAcquirableJobs", ctx, 1).Return(&actions.AcquirableJobList{ mockActionsClient.On("GetAcquirableJobs", ctx, 1).Return(&actions.AcquirableJobList{
Count: 1, Count: 1,
Jobs: []actions.AcquirableJob{ Jobs: []actions.AcquirableJob{
@@ -439,7 +439,7 @@ func TestGetRunnerScaleSetMessage_HandleInitialMessage(t *testing.T) {
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
return nil return nil
}) }, 10)
assert.NoError(t, err, "Error getting message") assert.NoError(t, err, "Error getting message")
assert.Nil(t, asClient.initialMessage, "Initial message should be nil") assert.Nil(t, asClient.initialMessage, "Initial message should be nil")
@@ -488,7 +488,7 @@ func TestGetRunnerScaleSetMessage_HandleInitialMessageFailed(t *testing.T) {
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
return fmt.Errorf("error") return fmt.Errorf("error")
}) }, 10)
assert.ErrorContains(t, err, "fail to process initial message. error", "Error getting message") assert.ErrorContains(t, err, "fail to process initial message. error", "Error getting message")
assert.NotNil(t, asClient.initialMessage, "Initial message should be nil") assert.NotNil(t, asClient.initialMessage, "Initial message should be nil")
@@ -516,8 +516,8 @@ func TestGetRunnerScaleSetMessage_RetryUntilGetMessage(t *testing.T) {
Statistics: &actions.RunnerScaleSetStatistic{}, Statistics: &actions.RunnerScaleSetStatistic{},
} }
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
mockSessionClient.On("GetMessage", ctx, int64(0)).Return(nil, nil).Times(3) mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(nil, nil).Times(3)
mockSessionClient.On("GetMessage", ctx, int64(0)).Return(&actions.RunnerScaleSetMessage{ mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(&actions.RunnerScaleSetMessage{
MessageId: 1, MessageId: 1,
MessageType: "test", MessageType: "test",
Body: "test", Body: "test",
@@ -532,13 +532,13 @@ func TestGetRunnerScaleSetMessage_RetryUntilGetMessage(t *testing.T) {
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
return nil return nil
}) }, 10)
assert.NoError(t, err, "Error getting initial message") assert.NoError(t, err, "Error getting initial message")
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
return nil return nil
}) }, 10)
assert.NoError(t, err, "Error getting message") assert.NoError(t, err, "Error getting message")
assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated") assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated")
@@ -565,7 +565,7 @@ func TestGetRunnerScaleSetMessage_ErrorOnGetMessage(t *testing.T) {
Statistics: &actions.RunnerScaleSetStatistic{}, Statistics: &actions.RunnerScaleSetStatistic{},
} }
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
mockSessionClient.On("GetMessage", ctx, int64(0)).Return(nil, fmt.Errorf("error")) mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(nil, fmt.Errorf("error"))
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) { asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
asc.client = mockSessionClient asc.client = mockSessionClient
@@ -575,12 +575,12 @@ func TestGetRunnerScaleSetMessage_ErrorOnGetMessage(t *testing.T) {
// process initial message // process initial message
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
return nil return nil
}) }, 10)
assert.NoError(t, err, "Error getting initial message") assert.NoError(t, err, "Error getting initial message")
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
return fmt.Errorf("Should not be called") return fmt.Errorf("Should not be called")
}) }, 10)
assert.ErrorContains(t, err, "get message failed from refreshing client. error", "Error should be returned") assert.ErrorContains(t, err, "get message failed from refreshing client. error", "Error should be returned")
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be updated") assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be updated")
@@ -608,7 +608,7 @@ func TestDeleteRunnerScaleSetMessage_Error(t *testing.T) {
Statistics: &actions.RunnerScaleSetStatistic{}, Statistics: &actions.RunnerScaleSetStatistic{},
} }
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
mockSessionClient.On("GetMessage", ctx, int64(0)).Return(&actions.RunnerScaleSetMessage{ mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(&actions.RunnerScaleSetMessage{
MessageId: 1, MessageId: 1,
MessageType: "test", MessageType: "test",
Body: "test", Body: "test",
@@ -623,13 +623,13 @@ func TestDeleteRunnerScaleSetMessage_Error(t *testing.T) {
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
return nil return nil
}) }, 10)
assert.NoError(t, err, "Error getting initial message") assert.NoError(t, err, "Error getting initial message")
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
return nil return nil
}) }, 10)
assert.ErrorContains(t, err, "delete message failed from refreshing client. error", "Error getting message") assert.ErrorContains(t, err, "delete message failed from refreshing client. error", "Error getting message")
assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated") assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated")

View File

@@ -5,7 +5,6 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"math"
"strings" "strings"
"github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config" "github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
@@ -90,7 +89,7 @@ func (s *Service) Start() error {
s.logger.Info("service is stopped.") s.logger.Info("service is stopped.")
return nil return nil
default: default:
err := s.rsClient.GetRunnerScaleSetMessage(s.ctx, s.processMessage) err := s.rsClient.GetRunnerScaleSetMessage(s.ctx, s.processMessage, s.settings.MaxRunners)
if err != nil { if err != nil {
return fmt.Errorf("could not get and process message. %w", err) return fmt.Errorf("could not get and process message. %w", err)
} }
@@ -206,7 +205,9 @@ func (s *Service) processMessage(message *actions.RunnerScaleSetMessage) error {
} }
func (s *Service) scaleForAssignedJobCount(count int) error { func (s *Service) scaleForAssignedJobCount(count int) error {
targetRunnerCount := int(math.Max(math.Min(float64(s.settings.MaxRunners), float64(count)), float64(s.settings.MinRunners))) // Max runners should always be set by the resource builder either to the configured value,
// or the maximum int32 (resourcebuilder.newAutoScalingListener()).
targetRunnerCount := min(s.settings.MinRunners+count, s.settings.MaxRunners)
s.metricsExporter.publishDesiredRunners(targetRunnerCount) s.metricsExporter.publishDesiredRunners(targetRunnerCount)
if targetRunnerCount != s.currentRunnerCount { if targetRunnerCount != s.currentRunnerCount {
s.logger.Info("try scale runner request up/down base on assigned job count", s.logger.Info("try scale runner request up/down base on assigned job count",

View File

@@ -64,7 +64,7 @@ func TestStart(t *testing.T) {
) )
require.NoError(t, err) require.NoError(t, err)
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once() mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Run(func(mock.Arguments) { cancel() }).Return(nil).Once()
err = service.Start() err = service.Start()
@@ -98,7 +98,7 @@ func TestStart_ScaleToMinRunners(t *testing.T) {
) )
require.NoError(t, err) require.NoError(t, err)
mockRsClient.On("GetRunnerScaleSetMessage", ctx, mock.Anything).Run(func(args mock.Arguments) { mockRsClient.On("GetRunnerScaleSetMessage", ctx, mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
_ = service.scaleForAssignedJobCount(5) _ = service.scaleForAssignedJobCount(5)
}).Return(nil) }).Return(nil)
@@ -137,7 +137,7 @@ func TestStart_ScaleToMinRunnersFailed(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
c := mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(fmt.Errorf("error")).Once() c := mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(fmt.Errorf("error")).Once()
mockRsClient.On("GetRunnerScaleSetMessage", ctx, mock.Anything).Run(func(args mock.Arguments) { mockRsClient.On("GetRunnerScaleSetMessage", ctx, mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
_ = service.scaleForAssignedJobCount(5) _ = service.scaleForAssignedJobCount(5)
}).Return(c.ReturnArguments.Get(0)) }).Return(c.ReturnArguments.Get(0))
@@ -172,8 +172,8 @@ func TestStart_GetMultipleMessages(t *testing.T) {
) )
require.NoError(t, err) require.NoError(t, err)
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Return(nil).Times(5) mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Return(nil).Times(5)
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once() mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
err = service.Start() err = service.Start()
@@ -207,8 +207,8 @@ func TestStart_ErrorOnMessage(t *testing.T) {
) )
require.NoError(t, err) require.NoError(t, err)
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Return(nil).Times(2) mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Return(nil).Times(2)
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Return(fmt.Errorf("error")).Once() mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("error")).Once()
err = service.Start() err = service.Start()
@@ -397,7 +397,7 @@ func TestProcessMessage_MultipleMessages(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 3 && ids[1] == 4 })).Return(nil).Once() mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 3 && ids[1] == 4 })).Return(nil).Once()
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once() mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 3).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
err = service.processMessage(&actions.RunnerScaleSetMessage{ err = service.processMessage(&actions.RunnerScaleSetMessage{
MessageId: 1, MessageId: 1,
@@ -523,9 +523,9 @@ func TestScaleForAssignedJobCount_ScaleWithinMinMax(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 1).Return(nil).Once() mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 1).Return(nil).Once()
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 3).Return(nil).Once() mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 4).Return(nil).Once()
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once() mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 1).Return(nil).Once() mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(nil).Once()
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once() mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
err = service.scaleForAssignedJobCount(0) err = service.scaleForAssignedJobCount(0)
@@ -569,7 +569,7 @@ func TestScaleForAssignedJobCount_ScaleFailed(t *testing.T) {
) )
require.NoError(t, err) require.NoError(t, err)
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(fmt.Errorf("error")) mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 3).Return(fmt.Errorf("error"))
err = service.scaleForAssignedJobCount(2) err = service.scaleForAssignedJobCount(2)
@@ -605,8 +605,23 @@ func TestProcessMessage_JobStartedMessage(t *testing.T) {
service.currentRunnerCount = 1 service.currentRunnerCount = 1
mockKubeManager.On("UpdateEphemeralRunnerWithJobInfo", ctx, service.settings.Namespace, "runner1", "owner1", "repo1", ".github/workflows/ci.yaml", "job1", int64(100), int64(3)).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once() mockKubeManager.On(
"UpdateEphemeralRunnerWithJobInfo",
ctx,
service.settings.Namespace,
"runner1",
"owner1",
"repo1",
".github/workflows/ci.yaml",
"job1",
int64(100),
int64(3),
).Run(
func(_ mock.Arguments) { cancel() },
).Return(nil).Once()
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return len(ids) == 0 })).Return(nil).Once() mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return len(ids) == 0 })).Return(nil).Once()
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(nil)
err = service.processMessage(&actions.RunnerScaleSetMessage{ err = service.processMessage(&actions.RunnerScaleSetMessage{
MessageId: 1, MessageId: 1,

View File

@@ -176,6 +176,8 @@ func run(ctx context.Context, rc config.Config, logger logr.Logger, opts runOpti
Version: build.Version, Version: build.Version,
CommitSHA: build.CommitSHA, CommitSHA: build.CommitSHA,
ScaleSetID: rc.RunnerScaleSetId, ScaleSetID: rc.RunnerScaleSetId,
HasProxy: hasProxy(),
Subsystem: "githubrunnerscalesetlistener",
}) })
if err != nil { if err != nil {
return fmt.Errorf("failed to create an Actions Service client: %w", err) return fmt.Errorf("failed to create an Actions Service client: %w", err)
@@ -235,3 +237,8 @@ func newActionsClientFromConfig(config config.Config, creds *actions.ActionsAuth
return actions.NewClient(config.ConfigureUrl, creds, options...) return actions.NewClient(config.ConfigureUrl, creds, options...)
} }
func hasProxy() bool {
proxyFunc := httpproxy.FromEnvironment().ProxyFunc()
return proxyFunc != nil
}

View File

@@ -8,6 +8,6 @@ import (
//go:generate mockery --inpackage --name=RunnerScaleSetClient //go:generate mockery --inpackage --name=RunnerScaleSetClient
type RunnerScaleSetClient interface { type RunnerScaleSetClient interface {
GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error) error GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error, maxCapacity int) error
AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error
} }

View File

@@ -1,4 +1,4 @@
// Code generated by mockery v2.33.2. DO NOT EDIT. // Code generated by mockery v2.36.1. DO NOT EDIT.
package main package main

View File

@@ -1,4 +1,4 @@
// Code generated by mockery v2.33.2. DO NOT EDIT. // Code generated by mockery v2.36.1. DO NOT EDIT.
package main package main
@@ -29,13 +29,13 @@ func (_m *MockRunnerScaleSetClient) AcquireJobsForRunnerScaleSet(ctx context.Con
return r0 return r0
} }
// GetRunnerScaleSetMessage provides a mock function with given fields: ctx, handler // GetRunnerScaleSetMessage provides a mock function with given fields: ctx, handler, maxCapacity
func (_m *MockRunnerScaleSetClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(*actions.RunnerScaleSetMessage) error) error { func (_m *MockRunnerScaleSetClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(*actions.RunnerScaleSetMessage) error, maxCapacity int) error {
ret := _m.Called(ctx, handler) ret := _m.Called(ctx, handler, maxCapacity)
var r0 error var r0 error
if rf, ok := ret.Get(0).(func(context.Context, func(*actions.RunnerScaleSetMessage) error) error); ok { if rf, ok := ret.Get(0).(func(context.Context, func(*actions.RunnerScaleSetMessage) error, int) error); ok {
r0 = rf(ctx, handler) r0 = rf(ctx, handler, maxCapacity)
} else { } else {
r0 = ret.Error(0) r0 = ret.Error(0)
} }

View File

@@ -24,8 +24,12 @@ func newSessionClient(client actions.ActionsService, logger *logr.Logger, sessio
} }
} }
func (m *SessionRefreshingClient) GetMessage(ctx context.Context, lastMessageId int64) (*actions.RunnerScaleSetMessage, error) { func (m *SessionRefreshingClient) GetMessage(ctx context.Context, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error) {
message, err := m.client.GetMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, lastMessageId) if maxCapacity < 0 {
return nil, fmt.Errorf("maxCapacity must be greater than or equal to 0")
}
message, err := m.client.GetMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, lastMessageId, maxCapacity)
if err == nil { if err == nil {
return message, nil return message, nil
} }
@@ -42,7 +46,7 @@ func (m *SessionRefreshingClient) GetMessage(ctx context.Context, lastMessageId
} }
m.session = session m.session = session
message, err = m.client.GetMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, lastMessageId) message, err = m.client.GetMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, lastMessageId, maxCapacity)
if err != nil { if err != nil {
return nil, fmt.Errorf("delete message failed after refresh message session. %w", err) return nil, fmt.Errorf("delete message failed after refresh message session. %w", err)
} }

View File

@@ -31,17 +31,17 @@ func TestGetMessage(t *testing.T) {
}, },
} }
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0)).Return(nil, nil).Once() mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0), 10).Return(nil, nil).Once()
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0)).Return(&actions.RunnerScaleSetMessage{MessageId: 1}, nil).Once() mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0), 10).Return(&actions.RunnerScaleSetMessage{MessageId: 1}, nil).Once()
client := newSessionClient(mockActionsClient, &logger, session) client := newSessionClient(mockActionsClient, &logger, session)
msg, err := client.GetMessage(ctx, 0) msg, err := client.GetMessage(ctx, 0, 10)
require.NoError(t, err, "GetMessage should not return an error") require.NoError(t, err, "GetMessage should not return an error")
assert.Nil(t, msg, "GetMessage should return nil message") assert.Nil(t, msg, "GetMessage should return nil message")
msg, err = client.GetMessage(ctx, 0) msg, err = client.GetMessage(ctx, 0, 10)
require.NoError(t, err, "GetMessage should not return an error") require.NoError(t, err, "GetMessage should not return an error")
assert.Equal(t, int64(1), msg.MessageId, "GetMessage should return a message with id 1") assert.Equal(t, int64(1), msg.MessageId, "GetMessage should return a message with id 1")
@@ -146,11 +146,11 @@ func TestGetMessage_Error(t *testing.T) {
}, },
} }
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0)).Return(nil, fmt.Errorf("error")).Once() mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0), 10).Return(nil, fmt.Errorf("error")).Once()
client := newSessionClient(mockActionsClient, &logger, session) client := newSessionClient(mockActionsClient, &logger, session)
msg, err := client.GetMessage(ctx, 0) msg, err := client.GetMessage(ctx, 0, 10)
assert.ErrorContains(t, err, "get message failed. error", "GetMessage should return an error") assert.ErrorContains(t, err, "get message failed. error", "GetMessage should return an error")
assert.Nil(t, msg, "GetMessage should return nil message") assert.Nil(t, msg, "GetMessage should return nil message")
assert.True(t, mockActionsClient.AssertExpectations(t), "All expected calls to mockActionsClient should have been made") assert.True(t, mockActionsClient.AssertExpectations(t), "All expected calls to mockActionsClient should have been made")
@@ -227,8 +227,8 @@ func TestGetMessage_RefreshToken(t *testing.T) {
Id: 1, Id: 1,
}, },
} }
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0)).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once() mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0), 10).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once()
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, "token2", int64(0)).Return(&actions.RunnerScaleSetMessage{ mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, "token2", int64(0), 10).Return(&actions.RunnerScaleSetMessage{
MessageId: 1, MessageId: 1,
MessageType: "test", MessageType: "test",
Body: "test", Body: "test",
@@ -243,7 +243,7 @@ func TestGetMessage_RefreshToken(t *testing.T) {
}, nil).Once() }, nil).Once()
client := newSessionClient(mockActionsClient, &logger, session) client := newSessionClient(mockActionsClient, &logger, session)
msg, err := client.GetMessage(ctx, 0) msg, err := client.GetMessage(ctx, 0, 10)
assert.NoError(t, err, "Error getting message") assert.NoError(t, err, "Error getting message")
assert.Equal(t, int64(1), msg.MessageId, "message id should be updated") assert.Equal(t, int64(1), msg.MessageId, "message id should be updated")
assert.Equal(t, "token2", client.session.MessageQueueAccessToken, "Message queue access token should be updated") assert.Equal(t, "token2", client.session.MessageQueueAccessToken, "Message queue access token should be updated")
@@ -340,11 +340,11 @@ func TestGetMessage_RefreshToken_Failed(t *testing.T) {
Id: 1, Id: 1,
}, },
} }
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0)).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once() mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0), 10).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once()
mockActionsClient.On("RefreshMessageSession", ctx, session.RunnerScaleSet.Id, session.SessionId).Return(nil, fmt.Errorf("error")) mockActionsClient.On("RefreshMessageSession", ctx, session.RunnerScaleSet.Id, session.SessionId).Return(nil, fmt.Errorf("error"))
client := newSessionClient(mockActionsClient, &logger, session) client := newSessionClient(mockActionsClient, &logger, session)
msg, err := client.GetMessage(ctx, 0) msg, err := client.GetMessage(ctx, 0, 10)
assert.ErrorContains(t, err, "refresh message session failed. error", "Error should be returned") assert.ErrorContains(t, err, "refresh message session failed. error", "Error should be returned")
assert.Nil(t, msg, "Message should be nil") assert.Nil(t, msg, "Message should be nil")
assert.Equal(t, "token", client.session.MessageQueueAccessToken, "Message queue access token should not be updated") assert.Equal(t, "token", client.session.MessageQueueAccessToken, "Message queue access token should not be updated")

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
annotations: annotations:
controller-gen.kubebuilder.io/version: v0.13.0 controller-gen.kubebuilder.io/version: v0.14.0
name: horizontalrunnerautoscalers.actions.summerwind.dev name: horizontalrunnerautoscalers.actions.summerwind.dev
spec: spec:
group: actions.summerwind.dev group: actions.summerwind.dev
@@ -35,10 +35,19 @@ spec:
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
properties: properties:
apiVersion: apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string type: string
kind: kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string type: string
metadata: metadata:
type: object type: object
@@ -47,7 +56,9 @@ spec:
properties: properties:
capacityReservations: capacityReservations:
items: items:
description: CapacityReservation specifies the number of replicas temporarily added to the scale target until ExpirationTime. description: |-
CapacityReservation specifies the number of replicas temporarily added
to the scale target until ExpirationTime.
properties: properties:
effectiveTime: effectiveTime:
format: date-time format: date-time
@@ -79,30 +90,46 @@ spec:
items: items:
properties: properties:
repositoryNames: repositoryNames:
description: RepositoryNames is the list of repository names to be used for calculating the metric. For example, a repository name is the REPO part of `github.com/USER/REPO`. description: |-
RepositoryNames is the list of repository names to be used for calculating the metric.
For example, a repository name is the REPO part of `github.com/USER/REPO`.
items: items:
type: string type: string
type: array type: array
scaleDownAdjustment: scaleDownAdjustment:
description: ScaleDownAdjustment is the number of runners removed on scale-down. You can only specify either ScaleDownFactor or ScaleDownAdjustment. description: |-
ScaleDownAdjustment is the number of runners removed on scale-down.
You can only specify either ScaleDownFactor or ScaleDownAdjustment.
type: integer type: integer
scaleDownFactor: scaleDownFactor:
description: ScaleDownFactor is the multiplicative factor applied to the current number of runners used to determine how many pods should be removed. description: |-
ScaleDownFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be removed.
type: string type: string
scaleDownThreshold: scaleDownThreshold:
description: ScaleDownThreshold is the percentage of busy runners less than which will trigger the hpa to scale the runners down. description: |-
ScaleDownThreshold is the percentage of busy runners less than which will
trigger the hpa to scale the runners down.
type: string type: string
scaleUpAdjustment: scaleUpAdjustment:
description: ScaleUpAdjustment is the number of runners added on scale-up. You can only specify either ScaleUpFactor or ScaleUpAdjustment. description: |-
ScaleUpAdjustment is the number of runners added on scale-up.
You can only specify either ScaleUpFactor or ScaleUpAdjustment.
type: integer type: integer
scaleUpFactor: scaleUpFactor:
description: ScaleUpFactor is the multiplicative factor applied to the current number of runners used to determine how many pods should be added. description: |-
ScaleUpFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be added.
type: string type: string
scaleUpThreshold: scaleUpThreshold:
description: ScaleUpThreshold is the percentage of busy runners greater than which will trigger the hpa to scale runners up. description: |-
ScaleUpThreshold is the percentage of busy runners greater than which will
trigger the hpa to scale runners up.
type: string type: string
type: type:
description: Type is the type of metric to be used for autoscaling. It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy. description: |-
Type is the type of metric to be used for autoscaling.
It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
type: string type: string
type: object type: object
type: array type: array
@@ -110,7 +137,9 @@ spec:
description: MinReplicas is the minimum number of replicas the deployment is allowed to scale description: MinReplicas is the minimum number of replicas the deployment is allowed to scale
type: integer type: integer
scaleDownDelaySecondsAfterScaleOut: scaleDownDelaySecondsAfterScaleOut:
description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up Used to prevent flapping (down->up->down->... loop) description: |-
ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up
Used to prevent flapping (down->up->down->... loop)
type: integer type: integer
scaleTargetRef: scaleTargetRef:
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
@@ -126,7 +155,18 @@ spec:
type: string type: string
type: object type: object
scaleUpTriggers: scaleUpTriggers:
description: "ScaleUpTriggers is an experimental feature to increase the desired replicas by 1 on each webhook requested received by the webhookBasedAutoscaler. \n This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster. \n Note that the added runners remain until the next sync period at least, and they may or may not be used by GitHub Actions depending on the timing. They are intended to be used to gain \"resource slack\" immediately after you receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available." description: |-
ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
on each webhook requested received by the webhookBasedAutoscaler.
This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
Note that the added runners remain until the next sync period at least,
and they may or may not be used by GitHub Actions depending on the timing.
They are intended to be used to gain "resource slack" immediately after you
receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
items: items:
properties: properties:
amount: amount:
@@ -139,12 +179,18 @@ spec:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
properties: properties:
names: names:
description: Names is a list of GitHub Actions glob patterns. Any check_run event whose name matches one of patterns in the list can trigger autoscaling. Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file. So it is very likely that you can utilize this to trigger depending on the job. description: |-
Names is a list of GitHub Actions glob patterns.
Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
So it is very likely that you can utilize this to trigger depending on the job.
items: items:
type: string type: string
type: array type: array
repositories: repositories:
description: Repositories is a list of GitHub repositories. Any check_run event whose repository matches one of repositories in the list can trigger autoscaling. description: |-
Repositories is a list of GitHub repositories.
Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
items: items:
type: string type: string
type: array type: array
@@ -169,7 +215,9 @@ spec:
type: array type: array
type: object type: object
push: push:
description: PushSpec is the condition for triggering scale-up on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push description: |-
PushSpec is the condition for triggering scale-up on push event
Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
type: object type: object
workflowJob: workflowJob:
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
@@ -178,23 +226,33 @@ spec:
type: object type: object
type: array type: array
scheduledOverrides: scheduledOverrides:
description: ScheduledOverrides is the list of ScheduledOverride. It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. The earlier a scheduled override is, the higher it is prioritized. description: |-
ScheduledOverrides is the list of ScheduledOverride.
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
The earlier a scheduled override is, the higher it is prioritized.
items: items:
description: ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year. description: |-
ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
properties: properties:
endTime: endTime:
description: EndTime is the time at which the first override ends. description: EndTime is the time at which the first override ends.
format: date-time format: date-time
type: string type: string
minReplicas: minReplicas:
description: MinReplicas is the number of runners while overriding. If omitted, it doesn't override minReplicas. description: |-
MinReplicas is the number of runners while overriding.
If omitted, it doesn't override minReplicas.
minimum: 0 minimum: 0
nullable: true nullable: true
type: integer type: integer
recurrenceRule: recurrenceRule:
properties: properties:
frequency: frequency:
description: Frequency is the name of a predefined interval of each recurrence. The valid values are "Daily", "Weekly", "Monthly", and "Yearly". If empty, the corresponding override happens only once. description: |-
Frequency is the name of a predefined interval of each recurrence.
The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
If empty, the corresponding override happens only once.
enum: enum:
- Daily - Daily
- Weekly - Weekly
@@ -202,7 +260,9 @@ spec:
- Yearly - Yearly
type: string type: string
untilTime: untilTime:
description: UntilTime is the time of the final recurrence. If empty, the schedule recurs forever. description: |-
UntilTime is the time of the final recurrence.
If empty, the schedule recurs forever.
format: date-time format: date-time
type: string type: string
type: object type: object
@@ -231,18 +291,24 @@ spec:
type: object type: object
type: array type: array
desiredReplicas: desiredReplicas:
description: DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet This doesn't include outdated pods while upgrading the deployment and replacing the runnerset. description: |-
DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
type: integer type: integer
lastSuccessfulScaleOutTime: lastSuccessfulScaleOutTime:
format: date-time format: date-time
nullable: true nullable: true
type: string type: string
observedGeneration: observedGeneration:
description: ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g. RunnerDeployment's generation, which is updated on mutation by the API Server. description: |-
ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g.
RunnerDeployment's generation, which is updated on mutation by the API Server.
format: int64 format: int64
type: integer type: integer
scheduledOverridesSummary: scheduledOverridesSummary:
description: ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output for observability. description: |-
ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
for observability.
type: string type: string
type: object type: object
type: object type: object

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -41,7 +41,7 @@ import (
) )
const ( const (
autoscalingListenerContainerName = "autoscaler" autoscalingListenerContainerName = "listener"
autoscalingListenerFinalizerName = "autoscalinglistener.actions.github.com/finalizer" autoscalingListenerFinalizerName = "autoscalinglistener.actions.github.com/finalizer"
) )
@@ -690,30 +690,6 @@ func (r *AutoscalingListenerReconciler) publishRunningListener(autoscalingListen
// SetupWithManager sets up the controller with the Manager. // SetupWithManager sets up the controller with the Manager.
func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error { func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error {
groupVersionIndexer := func(rawObj client.Object) []string {
groupVersion := v1alpha1.GroupVersion.String()
owner := metav1.GetControllerOf(rawObj)
if owner == nil {
return nil
}
// ...make sure it is owned by this controller
if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingListener" {
return nil
}
// ...and if so, return it
return []string{owner.Name}
}
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, resourceOwnerKey, groupVersionIndexer); err != nil {
return err
}
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.ServiceAccount{}, resourceOwnerKey, groupVersionIndexer); err != nil {
return err
}
labelBasedWatchFunc := func(_ context.Context, obj client.Object) []reconcile.Request { labelBasedWatchFunc := func(_ context.Context, obj client.Object) []reconcile.Request {
var requests []reconcile.Request var requests []reconcile.Request
labels := obj.GetLabels() labels := obj.GetLabels()

View File

@@ -21,7 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
) )
const ( const (
@@ -34,9 +34,9 @@ var _ = Describe("Test AutoScalingListener controller", func() {
var ctx context.Context var ctx context.Context
var mgr ctrl.Manager var mgr ctrl.Manager
var autoscalingNS *corev1.Namespace var autoscalingNS *corev1.Namespace
var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
var configSecret *corev1.Secret var configSecret *corev1.Secret
var autoscalingListener *actionsv1alpha1.AutoscalingListener var autoscalingListener *v1alpha1.AutoscalingListener
BeforeEach(func() { BeforeEach(func() {
ctx = context.Background() ctx = context.Background()
@@ -53,12 +53,12 @@ var _ = Describe("Test AutoScalingListener controller", func() {
min := 1 min := 1
max := 10 max := 10
autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs", Name: "test-asrs",
Namespace: autoscalingNS.Name, Namespace: autoscalingNS.Name,
}, },
Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name, GitHubConfigSecret: configSecret.Name,
MaxRunners: &max, MaxRunners: &max,
@@ -79,12 +79,12 @@ var _ = Describe("Test AutoScalingListener controller", func() {
err = k8sClient.Create(ctx, autoscalingRunnerSet) err = k8sClient.Create(ctx, autoscalingRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
autoscalingListener = &actionsv1alpha1.AutoscalingListener{ autoscalingListener = &v1alpha1.AutoscalingListener{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "test-asl", Name: "test-asl",
Namespace: autoscalingNS.Name, Namespace: autoscalingNS.Name,
}, },
Spec: actionsv1alpha1.AutoscalingListenerSpec{ Spec: v1alpha1.AutoscalingListenerSpec{
GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name, GitHubConfigSecret: configSecret.Name,
RunnerScaleSetId: 1, RunnerScaleSetId: 1,
@@ -119,7 +119,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
).Should(Succeed(), "Config secret should be created") ).Should(Succeed(), "Config secret should be created")
// Check if finalizer is added // Check if finalizer is added
created := new(actionsv1alpha1.AutoscalingListener) created := new(v1alpha1.AutoscalingListener)
Eventually( Eventually(
func() (string, error) { func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, created) err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, created)
@@ -298,7 +298,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
// The AutoScalingListener should be deleted // The AutoScalingListener should be deleted
Eventually( Eventually(
func() error { func() error {
listenerList := new(actionsv1alpha1.AutoscalingListenerList) listenerList := new(v1alpha1.AutoscalingListenerList)
err := k8sClient.List(ctx, listenerList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{".metadata.name": autoscalingListener.Name}) err := k8sClient.List(ctx, listenerList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{".metadata.name": autoscalingListener.Name})
if err != nil { if err != nil {
return err return err
@@ -415,9 +415,9 @@ var _ = Describe("Test AutoScalingListener customization", func() {
var ctx context.Context var ctx context.Context
var mgr ctrl.Manager var mgr ctrl.Manager
var autoscalingNS *corev1.Namespace var autoscalingNS *corev1.Namespace
var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
var configSecret *corev1.Secret var configSecret *corev1.Secret
var autoscalingListener *actionsv1alpha1.AutoscalingListener var autoscalingListener *v1alpha1.AutoscalingListener
var runAsUser int64 = 1001 var runAsUser int64 = 1001
@@ -425,7 +425,7 @@ var _ = Describe("Test AutoScalingListener customization", func() {
Spec: corev1.PodSpec{ Spec: corev1.PodSpec{
Containers: []corev1.Container{ Containers: []corev1.Container{
{ {
Name: "listener", Name: autoscalingListenerContainerName,
ImagePullPolicy: corev1.PullAlways, ImagePullPolicy: corev1.PullAlways,
SecurityContext: &corev1.SecurityContext{ SecurityContext: &corev1.SecurityContext{
RunAsUser: &runAsUser, RunAsUser: &runAsUser,
@@ -458,12 +458,12 @@ var _ = Describe("Test AutoScalingListener customization", func() {
min := 1 min := 1
max := 10 max := 10
autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs", Name: "test-asrs",
Namespace: autoscalingNS.Name, Namespace: autoscalingNS.Name,
}, },
Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name, GitHubConfigSecret: configSecret.Name,
MaxRunners: &max, MaxRunners: &max,
@@ -484,12 +484,12 @@ var _ = Describe("Test AutoScalingListener customization", func() {
err = k8sClient.Create(ctx, autoscalingRunnerSet) err = k8sClient.Create(ctx, autoscalingRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
autoscalingListener = &actionsv1alpha1.AutoscalingListener{ autoscalingListener = &v1alpha1.AutoscalingListener{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "test-asltest", Name: "test-asltest",
Namespace: autoscalingNS.Name, Namespace: autoscalingNS.Name,
}, },
Spec: actionsv1alpha1.AutoscalingListenerSpec{ Spec: v1alpha1.AutoscalingListenerSpec{
GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name, GitHubConfigSecret: configSecret.Name,
RunnerScaleSetId: 1, RunnerScaleSetId: 1,
@@ -512,7 +512,7 @@ var _ = Describe("Test AutoScalingListener customization", func() {
Context("When creating a new AutoScalingListener", func() { Context("When creating a new AutoScalingListener", func() {
It("It should create customized pod with applied configuration", func() { It("It should create customized pod with applied configuration", func() {
// Check if finalizer is added // Check if finalizer is added
created := new(actionsv1alpha1.AutoscalingListener) created := new(v1alpha1.AutoscalingListener)
Eventually( Eventually(
func() (string, error) { func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, created) err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, created)
@@ -555,7 +555,7 @@ var _ = Describe("Test AutoScalingListener customization", func() {
Expect(pod.Spec.SecurityContext.RunAsUser).To(Equal(&runAsUser), "Pod should have the correct security context") Expect(pod.Spec.SecurityContext.RunAsUser).To(Equal(&runAsUser), "Pod should have the correct security context")
Expect(pod.Spec.Containers[0].Name).NotTo(Equal("listener"), "Pod should have the correct container name") Expect(pod.Spec.Containers[0].Name).To(Equal(autoscalingListenerContainerName), "Pod should have the correct container name")
Expect(pod.Spec.Containers[0].SecurityContext.RunAsUser).To(Equal(&runAsUser), "Pod should have the correct security context") Expect(pod.Spec.Containers[0].SecurityContext.RunAsUser).To(Equal(&runAsUser), "Pod should have the correct security context")
Expect(pod.Spec.Containers[0].ImagePullPolicy).To(Equal(corev1.PullAlways), "Pod should have the correct image pull policy") Expect(pod.Spec.Containers[0].ImagePullPolicy).To(Equal(corev1.PullAlways), "Pod should have the correct image pull policy")
@@ -570,19 +570,19 @@ var _ = Describe("Test AutoScalingListener controller with proxy", func() {
var ctx context.Context var ctx context.Context
var mgr ctrl.Manager var mgr ctrl.Manager
var autoscalingNS *corev1.Namespace var autoscalingNS *corev1.Namespace
var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
var configSecret *corev1.Secret var configSecret *corev1.Secret
var autoscalingListener *actionsv1alpha1.AutoscalingListener var autoscalingListener *v1alpha1.AutoscalingListener
createRunnerSetAndListener := func(proxy *actionsv1alpha1.ProxyConfig) { createRunnerSetAndListener := func(proxy *v1alpha1.ProxyConfig) {
min := 1 min := 1
max := 10 max := 10
autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs", Name: "test-asrs",
Namespace: autoscalingNS.Name, Namespace: autoscalingNS.Name,
}, },
Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name, GitHubConfigSecret: configSecret.Name,
MaxRunners: &max, MaxRunners: &max,
@@ -604,12 +604,12 @@ var _ = Describe("Test AutoScalingListener controller with proxy", func() {
err := k8sClient.Create(ctx, autoscalingRunnerSet) err := k8sClient.Create(ctx, autoscalingRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
autoscalingListener = &actionsv1alpha1.AutoscalingListener{ autoscalingListener = &v1alpha1.AutoscalingListener{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "test-asl", Name: "test-asl",
Namespace: autoscalingNS.Name, Namespace: autoscalingNS.Name,
}, },
Spec: actionsv1alpha1.AutoscalingListenerSpec{ Spec: v1alpha1.AutoscalingListenerSpec{
GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name, GitHubConfigSecret: configSecret.Name,
RunnerScaleSetId: 1, RunnerScaleSetId: 1,
@@ -658,12 +658,12 @@ var _ = Describe("Test AutoScalingListener controller with proxy", func() {
err := k8sClient.Create(ctx, proxyCredentials) err := k8sClient.Create(ctx, proxyCredentials)
Expect(err).NotTo(HaveOccurred(), "failed to create proxy credentials secret") Expect(err).NotTo(HaveOccurred(), "failed to create proxy credentials secret")
proxy := &actionsv1alpha1.ProxyConfig{ proxy := &v1alpha1.ProxyConfig{
HTTP: &actionsv1alpha1.ProxyServerConfig{ HTTP: &v1alpha1.ProxyServerConfig{
Url: "http://localhost:8080", Url: "http://localhost:8080",
CredentialSecretRef: "proxy-credentials", CredentialSecretRef: "proxy-credentials",
}, },
HTTPS: &actionsv1alpha1.ProxyServerConfig{ HTTPS: &v1alpha1.ProxyServerConfig{
Url: "https://localhost:8443", Url: "https://localhost:8443",
CredentialSecretRef: "proxy-credentials", CredentialSecretRef: "proxy-credentials",
}, },
@@ -766,19 +766,19 @@ var _ = Describe("Test AutoScalingListener controller with template modification
var ctx context.Context var ctx context.Context
var mgr ctrl.Manager var mgr ctrl.Manager
var autoscalingNS *corev1.Namespace var autoscalingNS *corev1.Namespace
var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
var configSecret *corev1.Secret var configSecret *corev1.Secret
var autoscalingListener *actionsv1alpha1.AutoscalingListener var autoscalingListener *v1alpha1.AutoscalingListener
createRunnerSetAndListener := func(listenerTemplate *corev1.PodTemplateSpec) { createRunnerSetAndListener := func(listenerTemplate *corev1.PodTemplateSpec) {
min := 1 min := 1
max := 10 max := 10
autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs", Name: "test-asrs",
Namespace: autoscalingNS.Name, Namespace: autoscalingNS.Name,
}, },
Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name, GitHubConfigSecret: configSecret.Name,
MaxRunners: &max, MaxRunners: &max,
@@ -800,12 +800,12 @@ var _ = Describe("Test AutoScalingListener controller with template modification
err := k8sClient.Create(ctx, autoscalingRunnerSet) err := k8sClient.Create(ctx, autoscalingRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
autoscalingListener = &actionsv1alpha1.AutoscalingListener{ autoscalingListener = &v1alpha1.AutoscalingListener{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "test-asl", Name: "test-asl",
Namespace: autoscalingNS.Name, Namespace: autoscalingNS.Name,
}, },
Spec: actionsv1alpha1.AutoscalingListenerSpec{ Spec: v1alpha1.AutoscalingListenerSpec{
GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name, GitHubConfigSecret: configSecret.Name,
RunnerScaleSetId: 1, RunnerScaleSetId: 1,
@@ -854,7 +854,7 @@ var _ = Describe("Test AutoScalingListener controller with template modification
Spec: corev1.PodSpec{ Spec: corev1.PodSpec{
Containers: []corev1.Container{ Containers: []corev1.Container{
{ {
Name: "listener", Name: autoscalingListenerContainerName,
ImagePullPolicy: corev1.PullAlways, ImagePullPolicy: corev1.PullAlways,
SecurityContext: &corev1.SecurityContext{ SecurityContext: &corev1.SecurityContext{
RunAsUser: &runAsUser1001, RunAsUser: &runAsUser1001,
@@ -915,9 +915,9 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
var ctx context.Context var ctx context.Context
var mgr ctrl.Manager var mgr ctrl.Manager
var autoscalingNS *corev1.Namespace var autoscalingNS *corev1.Namespace
var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
var configSecret *corev1.Secret var configSecret *corev1.Secret
var autoscalingListener *actionsv1alpha1.AutoscalingListener var autoscalingListener *v1alpha1.AutoscalingListener
var rootCAConfigMap *corev1.ConfigMap var rootCAConfigMap *corev1.ConfigMap
BeforeEach(func() { BeforeEach(func() {
@@ -955,16 +955,16 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
min := 1 min := 1
max := 10 max := 10
autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs", Name: "test-asrs",
Namespace: autoscalingNS.Name, Namespace: autoscalingNS.Name,
}, },
Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name, GitHubConfigSecret: configSecret.Name,
GitHubServerTLS: &actionsv1alpha1.GitHubServerTLSConfig{ GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
CertificateFrom: &actionsv1alpha1.TLSCertificateSource{ CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{ LocalObjectReference: corev1.LocalObjectReference{
Name: rootCAConfigMap.Name, Name: rootCAConfigMap.Name,
@@ -991,16 +991,16 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
err = k8sClient.Create(ctx, autoscalingRunnerSet) err = k8sClient.Create(ctx, autoscalingRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
autoscalingListener = &actionsv1alpha1.AutoscalingListener{ autoscalingListener = &v1alpha1.AutoscalingListener{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "test-asl", Name: "test-asl",
Namespace: autoscalingNS.Name, Namespace: autoscalingNS.Name,
}, },
Spec: actionsv1alpha1.AutoscalingListenerSpec{ Spec: v1alpha1.AutoscalingListenerSpec{
GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name, GitHubConfigSecret: configSecret.Name,
GitHubServerTLS: &actionsv1alpha1.GitHubServerTLSConfig{ GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
CertificateFrom: &actionsv1alpha1.TLSCertificateSource{ CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{ LocalObjectReference: corev1.LocalObjectReference{
Name: rootCAConfigMap.Name, Name: rootCAConfigMap.Name,

View File

@@ -30,7 +30,6 @@ import (
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1" rbacv1 "k8s.io/api/rbac/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors" kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime" ctrl "sigs.k8s.io/controller-runtime"
@@ -42,10 +41,14 @@ import (
) )
const ( const (
labelKeyRunnerSpecHash = "runner-spec-hash" annotationKeyRunnerSpecHash = "actions.github.com/runner-spec-hash"
// annotationKeyValuesHash is hash of the entire values json.
// This is used to determine if the values have changed, so we can
// re-create listener.
annotationKeyValuesHash = "actions.github.com/values-hash"
autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer" autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer"
runnerScaleSetIdAnnotationKey = "runner-scale-set-id" runnerScaleSetIdAnnotationKey = "runner-scale-set-id"
runnerScaleSetNameAnnotationKey = "runner-scale-set-name"
) )
type UpdateStrategy string type UpdateStrategy string
@@ -205,7 +208,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
} }
// Make sure the runner scale set name is up to date // Make sure the runner scale set name is up to date
currentRunnerScaleSetName, ok := autoscalingRunnerSet.Annotations[runnerScaleSetNameAnnotationKey] currentRunnerScaleSetName, ok := autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName]
if !ok || (len(autoscalingRunnerSet.Spec.RunnerScaleSetName) > 0 && !strings.EqualFold(currentRunnerScaleSetName, autoscalingRunnerSet.Spec.RunnerScaleSetName)) { if !ok || (len(autoscalingRunnerSet.Spec.RunnerScaleSetName) > 0 && !strings.EqualFold(currentRunnerScaleSetName, autoscalingRunnerSet.Spec.RunnerScaleSetName)) {
log.Info("AutoScalingRunnerSet runner scale set name changed. Updating the runner scale set.") log.Info("AutoScalingRunnerSet runner scale set name changed. Updating the runner scale set.")
return r.updateRunnerScaleSetName(ctx, autoscalingRunnerSet, log) return r.updateRunnerScaleSetName(ctx, autoscalingRunnerSet, log)
@@ -231,9 +234,8 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log) return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log)
} }
desiredSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
for _, runnerSet := range existingRunnerSets.all() { for _, runnerSet := range existingRunnerSets.all() {
log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[labelKeyRunnerSpecHash]) log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Annotations[annotationKeyRunnerSpecHash])
} }
// Make sure the AutoscalingListener is up and running in the controller namespace // Make sure the AutoscalingListener is up and running in the controller namespace
@@ -250,7 +252,9 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
} }
// Our listener pod is out of date, so we need to delete it to get a new recreate. // Our listener pod is out of date, so we need to delete it to get a new recreate.
if listenerFound && (listener.Labels[labelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash()) { listenerValuesHashChanged := listener.Annotations[annotationKeyValuesHash] != autoscalingRunnerSet.Annotations[annotationKeyValuesHash]
listenerSpecHashChanged := listener.Annotations[annotationKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash()
if listenerFound && (listenerValuesHashChanged || listenerSpecHashChanged) {
log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name) log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name)
if err := r.Delete(ctx, listener); err != nil { if err := r.Delete(ctx, listener); err != nil {
if kerrors.IsNotFound(err) { if kerrors.IsNotFound(err) {
@@ -264,7 +268,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
if desiredSpecHash != latestRunnerSet.Labels[labelKeyRunnerSpecHash] { if latestRunnerSet.Annotations[annotationKeyRunnerSpecHash] != autoscalingRunnerSet.RunnerSetSpecHash() {
if r.drainingJobs(&latestRunnerSet.Status) { if r.drainingJobs(&latestRunnerSet.Status) {
log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Waiting for the running and pending runners to finish:", "running", latestRunnerSet.Status.RunningEphemeralRunners, "pending", latestRunnerSet.Status.PendingEphemeralRunners) log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Waiting for the running and pending runners to finish:", "running", latestRunnerSet.Status.RunningEphemeralRunners, "pending", latestRunnerSet.Status.PendingEphemeralRunners)
log.Info("Scaling down the number of desired replicas to 0") log.Info("Scaling down the number of desired replicas to 0")
@@ -272,6 +276,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
// need to scale down to 0 // need to scale down to 0
err := patch(ctx, r.Client, latestRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) { err := patch(ctx, r.Client, latestRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) {
obj.Spec.Replicas = 0 obj.Spec.Replicas = 0
obj.Spec.PatchID = 0
}) })
if err != nil { if err != nil {
log.Error(err, "Failed to patch runner set to set desired count to 0") log.Error(err, "Failed to patch runner set to set desired count to 0")
@@ -466,6 +471,8 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
Version: build.Version, Version: build.Version,
CommitSHA: build.CommitSHA, CommitSHA: build.CommitSHA,
ScaleSetID: runnerScaleSet.Id, ScaleSetID: runnerScaleSet.Id,
HasProxy: autoscalingRunnerSet.Spec.Proxy != nil,
Subsystem: "controller",
}) })
logger.Info("Created/Reused a runner scale set", "id", runnerScaleSet.Id, "runnerGroupName", runnerScaleSet.RunnerGroupName) logger.Info("Created/Reused a runner scale set", "id", runnerScaleSet.Id, "runnerGroupName", runnerScaleSet.RunnerGroupName)
@@ -478,7 +485,7 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
logger.Info("Adding runner scale set ID, name and runner group name as an annotation and url labels") logger.Info("Adding runner scale set ID, name and runner group name as an annotation and url labels")
if err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { if err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Annotations[runnerScaleSetNameAnnotationKey] = runnerScaleSet.Name obj.Annotations[AnnotationKeyGitHubRunnerScaleSetName] = runnerScaleSet.Name
obj.Annotations[runnerScaleSetIdAnnotationKey] = strconv.Itoa(runnerScaleSet.Id) obj.Annotations[runnerScaleSetIdAnnotationKey] = strconv.Itoa(runnerScaleSet.Id)
obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = runnerScaleSet.RunnerGroupName obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = runnerScaleSet.RunnerGroupName
if err := applyGitHubURLLabels(obj.Spec.GitHubConfigUrl, obj.Labels); err != nil { // should never happen if err := applyGitHubURLLabels(obj.Spec.GitHubConfigUrl, obj.Labels); err != nil { // should never happen
@@ -526,9 +533,10 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
return ctrl.Result{}, err return ctrl.Result{}, err
} }
logger.Info("Updating runner scale set runner group name as an annotation") logger.Info("Updating runner scale set name and runner group name as annotations")
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = updatedRunnerScaleSet.RunnerGroupName obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = updatedRunnerScaleSet.RunnerGroupName
obj.Annotations[AnnotationKeyGitHubRunnerScaleSetName] = updatedRunnerScaleSet.Name
}); err != nil { }); err != nil {
logger.Error(err, "Failed to update runner group name annotation") logger.Error(err, "Failed to update runner group name annotation")
return ctrl.Result{}, err return ctrl.Result{}, err
@@ -564,7 +572,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
logger.Info("Updating runner scale set name as an annotation") logger.Info("Updating runner scale set name as an annotation")
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Annotations[runnerScaleSetNameAnnotationKey] = updatedRunnerScaleSet.Name obj.Annotations[AnnotationKeyGitHubRunnerScaleSetName] = updatedRunnerScaleSet.Name
}); err != nil { }); err != nil {
logger.Error(err, "Failed to update runner scale set name annotation") logger.Error(err, "Failed to update runner scale set name annotation")
return ctrl.Result{}, err return ctrl.Result{}, err
@@ -750,26 +758,6 @@ func (r *AutoscalingRunnerSetReconciler) actionsClientOptionsFor(ctx context.Con
// SetupWithManager sets up the controller with the Manager. // SetupWithManager sets up the controller with the Manager.
func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error { func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
groupVersionIndexer := func(rawObj client.Object) []string {
groupVersion := v1alpha1.GroupVersion.String()
owner := metav1.GetControllerOf(rawObj)
if owner == nil {
return nil
}
// ...make sure it is owned by this controller
if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingRunnerSet" {
return nil
}
// ...and if so, return it
return []string{owner.Name}
}
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunnerSet{}, resourceOwnerKey, groupVersionIndexer); err != nil {
return err
}
return ctrl.NewControllerManagedBy(mgr). return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.AutoscalingRunnerSet{}). For(&v1alpha1.AutoscalingRunnerSet{}).
Owns(&v1alpha1.EphemeralRunnerSet{}). Owns(&v1alpha1.EphemeralRunnerSet{}).

View File

@@ -280,6 +280,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
// This should trigger re-creation of EphemeralRunnerSet and Listener // This should trigger re-creation of EphemeralRunnerSet and Listener
patched := autoscalingRunnerSet.DeepCopy() patched := autoscalingRunnerSet.DeepCopy()
patched.Spec.Template.Spec.PriorityClassName = "test-priority-class" patched.Spec.Template.Spec.PriorityClassName = "test-priority-class"
if patched.ObjectMeta.Annotations == nil {
patched.ObjectMeta.Annotations = make(map[string]string)
}
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "test-hash"
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet)) err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet") Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
autoscalingRunnerSet = patched.DeepCopy() autoscalingRunnerSet = patched.DeepCopy()
@@ -297,10 +301,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items)) return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items))
} }
return runnerSetList.Items[0].Labels[labelKeyRunnerSpecHash], nil return runnerSetList.Items[0].Annotations[annotationKeyRunnerSpecHash], nil
}, },
autoscalingRunnerSetTestTimeout, autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[labelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created") autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Annotations[annotationKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created")
// We should create a new listener // We should create a new listener
Eventually( Eventually(
@@ -334,6 +338,55 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet)) err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet") Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
// We should not re-create a new EphemeralRunnerSet
Consistently(
func() (string, error) {
runnerSetList := new(v1alpha1.EphemeralRunnerSetList)
err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace))
if err != nil {
return "", err
}
if len(runnerSetList.Items) != 1 {
return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items))
}
return string(runnerSetList.Items[0].UID), nil
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval).Should(BeEquivalentTo(string(runnerSet.UID)), "New EphemeralRunnerSet should not be created")
// We should only re-create a new listener
Eventually(
func() (string, error) {
listener := new(v1alpha1.AutoscalingListener)
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener)
if err != nil {
return "", err
}
return string(listener.UID), nil
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(string(listener.UID)), "New Listener should be created")
// Only update the values hash for the autoscaling runner set
// This should trigger re-creation of the Listener only
runnerSetList = new(v1alpha1.EphemeralRunnerSetList)
err = k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace))
Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet")
Expect(len(runnerSetList.Items)).To(Equal(1), "There should be 1 EphemeralRunnerSet")
runnerSet = runnerSetList.Items[0]
listener = new(v1alpha1.AutoscalingListener)
err = k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener)
Expect(err).NotTo(HaveOccurred(), "failed to get Listener")
patched = autoscalingRunnerSet.DeepCopy()
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "hash-changes"
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
// We should not re-create a new EphemeralRunnerSet // We should not re-create a new EphemeralRunnerSet
Consistently( Consistently(
func() (string, error) { func() (string, error) {
@@ -493,6 +546,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
// Patch the AutoScalingRunnerSet image which should trigger // Patch the AutoScalingRunnerSet image which should trigger
// the recreation of the Listener and EphemeralRunnerSet // the recreation of the Listener and EphemeralRunnerSet
patched := autoscalingRunnerSet.DeepCopy() patched := autoscalingRunnerSet.DeepCopy()
if patched.ObjectMeta.Annotations == nil {
patched.ObjectMeta.Annotations = make(map[string]string)
}
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "testgroup2"
patched.Spec.Template.Spec = corev1.PodSpec{ patched.Spec.Template.Spec = corev1.PodSpec{
Containers: []corev1.Container{ Containers: []corev1.Container{
{ {
@@ -501,7 +558,6 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
}, },
}, },
} }
// patched.Spec.Template.Spec.PriorityClassName = "test-priority-class"
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet)) err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet") Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
autoscalingRunnerSet = patched.DeepCopy() autoscalingRunnerSet = patched.DeepCopy()
@@ -698,7 +754,7 @@ var _ = Describe("Test AutoScalingController updates", Ordered, func() {
return "", err return "", err
} }
if val, ok := ars.Annotations[runnerScaleSetNameAnnotationKey]; ok { if val, ok := ars.Annotations[AnnotationKeyGitHubRunnerScaleSetName]; ok {
return val, nil return val, nil
} }
@@ -722,7 +778,7 @@ var _ = Describe("Test AutoScalingController updates", Ordered, func() {
return "", err return "", err
} }
if val, ok := ars.Annotations[runnerScaleSetNameAnnotationKey]; ok { if val, ok := ars.Annotations[AnnotationKeyGitHubRunnerScaleSetName]; ok {
return val, nil return val, nil
} }

View File

@@ -39,7 +39,11 @@ const (
// Finalizer used to protect resources from deletion while AutoscalingRunnerSet is running // Finalizer used to protect resources from deletion while AutoscalingRunnerSet is running
const AutoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection" const AutoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection"
const AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name" const (
AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name"
AnnotationKeyGitHubRunnerScaleSetName = "actions.github.com/runner-scale-set-name"
AnnotationKeyPatchID = "actions.github.com/patch-id"
)
// Labels applied to listener roles // Labels applied to listener roles
const ( const (
@@ -66,3 +70,9 @@ const DefaultScaleSetListenerLogFormat = string(logging.LogFormatText)
// ownerKey is field selector matching the owner name of a particular resource // ownerKey is field selector matching the owner name of a particular resource
const resourceOwnerKey = ".metadata.controller" const resourceOwnerKey = ".metadata.controller"
// EphemeralRunner pod creation failure reasons
const (
ReasonTooManyPodFailures = "TooManyPodFailures"
ReasonInvalidPodFailure = "InvalidPod"
)

View File

@@ -21,7 +21,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"net/http" "net/http"
"strings"
"time" "time"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
@@ -133,6 +132,23 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
if ephemeralRunner.IsDone() {
log.Info("Cleaning up resources after after ephemeral runner termination", "phase", ephemeralRunner.Status.Phase)
done, err := r.cleanupResources(ctx, ephemeralRunner, log)
if err != nil {
log.Error(err, "Failed to clean up ephemeral runner owned resources")
return ctrl.Result{}, err
}
if !done {
log.Info("Waiting for ephemeral runner owned resources to be deleted")
return ctrl.Result{Requeue: true}, nil
}
// Stop reconciling on this object.
// The EphemeralRunnerSet is responsible for cleaning it up.
log.Info("EphemeralRunner has already finished. Stopping reconciliation and waiting for EphemeralRunnerSet to clean it up", "phase", ephemeralRunner.Status.Phase)
return ctrl.Result{}, nil
}
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerActionsFinalizerName) { if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerActionsFinalizerName) {
log.Info("Adding runner registration finalizer") log.Info("Adding runner registration finalizer")
err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
@@ -159,13 +175,6 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
if ephemeralRunner.Status.Phase == corev1.PodSucceeded || ephemeralRunner.Status.Phase == corev1.PodFailed {
// Stop reconciling on this object.
// The EphemeralRunnerSet is responsible for cleaning it up.
log.Info("EphemeralRunner has already finished. Stopping reconciliation and waiting for EphemeralRunnerSet to clean it up", "phase", ephemeralRunner.Status.Phase)
return ctrl.Result{}, nil
}
if ephemeralRunner.Status.RunnerId == 0 { if ephemeralRunner.Status.RunnerId == 0 {
log.Info("Creating new ephemeral runner registration and updating status with runner config") log.Info("Creating new ephemeral runner registration and updating status with runner config")
return r.updateStatusWithRunnerConfig(ctx, ephemeralRunner, log) return r.updateStatusWithRunnerConfig(ctx, ephemeralRunner, log)
@@ -192,7 +201,7 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
case len(ephemeralRunner.Status.Failures) > 5: case len(ephemeralRunner.Status.Failures) > 5:
log.Info("EphemeralRunner has failed more than 5 times. Marking it as failed") log.Info("EphemeralRunner has failed more than 5 times. Marking it as failed")
errMessage := fmt.Sprintf("Pod has failed to start more than 5 times: %s", pod.Status.Message) errMessage := fmt.Sprintf("Pod has failed to start more than 5 times: %s", pod.Status.Message)
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, log); err != nil { if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, ReasonTooManyPodFailures, log); err != nil {
log.Error(err, "Failed to set ephemeral runner to phase Failed") log.Error(err, "Failed to set ephemeral runner to phase Failed")
return ctrl.Result{}, err return ctrl.Result{}, err
} }
@@ -201,7 +210,22 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
default: default:
// Pod was not found. Create if the pod has never been created // Pod was not found. Create if the pod has never been created
log.Info("Creating new EphemeralRunner pod.") log.Info("Creating new EphemeralRunner pod.")
return r.createPod(ctx, ephemeralRunner, secret, log) result, err := r.createPod(ctx, ephemeralRunner, secret, log)
switch {
case err == nil:
return result, nil
case kerrors.IsInvalid(err) || kerrors.IsForbidden(err):
log.Error(err, "Failed to create a pod due to unrecoverable failure")
errMessage := fmt.Sprintf("Failed to create the pod: %v", err)
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, ReasonInvalidPodFailure, log); err != nil {
log.Error(err, "Failed to set ephemeral runner to phase Failed")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
default:
log.Error(err, "Failed to create the pod")
return ctrl.Result{}, err
}
} }
} }
@@ -270,14 +294,17 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
} }
func (r *EphemeralRunnerReconciler) cleanupRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) { func (r *EphemeralRunnerReconciler) cleanupRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) {
actionsError := &actions.ActionsError{} if err := r.deleteRunnerFromService(ctx, ephemeralRunner, log); err != nil {
err := r.deleteRunnerFromService(ctx, ephemeralRunner, log) actionsError := &actions.ActionsError{}
if err != nil { if !errors.As(err, &actionsError) {
if errors.As(err, &actionsError) && log.Error(err, "Failed to clean up runner from the service (not an ActionsError)")
actionsError.StatusCode == http.StatusBadRequest && return ctrl.Result{}, err
strings.Contains(actionsError.ExceptionName, "JobStillRunningException") { }
if actionsError.StatusCode == http.StatusBadRequest && actionsError.IsException("JobStillRunningException") {
log.Info("Runner is still running the job. Re-queue in 30 seconds") log.Info("Runner is still running the job. Re-queue in 30 seconds")
return ctrl.Result{RequeueAfter: 30 * time.Second}, nil return ctrl.Result{RequeueAfter: 30 * time.Second}, nil
} }
log.Error(err, "Failed clean up runner from the service") log.Error(err, "Failed clean up runner from the service")
@@ -285,10 +312,9 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerFromService(ctx context.Context
} }
log.Info("Successfully removed runner registration from service") log.Info("Successfully removed runner registration from service")
err = patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { if err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
controllerutil.RemoveFinalizer(obj, ephemeralRunnerActionsFinalizerName) controllerutil.RemoveFinalizer(obj, ephemeralRunnerActionsFinalizerName)
}) }); err != nil {
if err != nil {
return ctrl.Result{}, err return ctrl.Result{}, err
} }
@@ -309,7 +335,7 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
} }
} }
return false, nil return false, nil
case err != nil && !kerrors.IsNotFound(err): case !kerrors.IsNotFound(err):
return false, err return false, err
} }
log.Info("Pod is deleted") log.Info("Pod is deleted")
@@ -326,7 +352,7 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
} }
} }
return false, nil return false, nil
case err != nil && !kerrors.IsNotFound(err): case !kerrors.IsNotFound(err):
return false, err return false, err
} }
log.Info("Secret is deleted") log.Info("Secret is deleted")
@@ -424,11 +450,11 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Conte
return false, multierr.Combine(errs...) return false, multierr.Combine(errs...)
} }
func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, errMessage string, log logr.Logger) error { func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, errMessage string, reason string, log logr.Logger) error {
log.Info("Updating ephemeral runner status to Failed") log.Info("Updating ephemeral runner status to Failed")
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.Phase = corev1.PodFailed obj.Status.Phase = corev1.PodFailed
obj.Status.Reason = "TooManyPodFailures" obj.Status.Reason = reason
obj.Status.Message = errMessage obj.Status.Message = errMessage
}); err != nil { }); err != nil {
return fmt.Errorf("failed to update ephemeral runner status Phase/Message: %v", err) return fmt.Errorf("failed to update ephemeral runner status Phase/Message: %v", err)
@@ -503,7 +529,7 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
} }
if actionsError.StatusCode != http.StatusConflict || if actionsError.StatusCode != http.StatusConflict ||
!strings.Contains(actionsError.ExceptionName, "AgentExistsException") { !actionsError.IsException("AgentExistsException") {
return ctrl.Result{}, fmt.Errorf("failed to generate JIT config with Actions service error: %v", err) return ctrl.Result{}, fmt.Errorf("failed to generate JIT config with Actions service error: %v", err)
} }
@@ -660,7 +686,7 @@ func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context,
return nil return nil
} }
log.Info("Updating ephemeral runner status with pod phase", "phase", pod.Status.Phase, "reason", pod.Status.Reason, "message", pod.Status.Message) log.Info("Updating ephemeral runner status with pod phase", "statusPhase", pod.Status.Phase, "statusReason", pod.Status.Reason, "statusMessage", pod.Status.Message)
err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.Phase = pod.Status.Phase obj.Status.Phase = pod.Status.Phase
obj.Status.Ready = obj.Status.Ready || (pod.Status.Phase == corev1.PodRunning) obj.Status.Ready = obj.Status.Ready || (pod.Status.Phase == corev1.PodRunning)
@@ -759,7 +785,7 @@ func (r EphemeralRunnerReconciler) runnerRegisteredWithService(ctx context.Conte
} }
if actionsError.StatusCode != http.StatusNotFound || if actionsError.StatusCode != http.StatusNotFound ||
!strings.Contains(actionsError.ExceptionName, "AgentNotFoundException") { !actionsError.IsException("AgentNotFoundException") {
return false, fmt.Errorf("failed to check if runner exists in GitHub service: %v", err) return false, fmt.Errorf("failed to check if runner exists in GitHub service: %v", err)
} }
@@ -789,7 +815,6 @@ func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context,
// SetupWithManager sets up the controller with the Manager. // SetupWithManager sets up the controller with the Manager.
func (r *EphemeralRunnerReconciler) SetupWithManager(mgr ctrl.Manager) error { func (r *EphemeralRunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
// TODO(nikola-jokic): Add indexing and filtering fields on corev1.Pod{}
return ctrl.NewControllerManagedBy(mgr). return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.EphemeralRunner{}). For(&v1alpha1.EphemeralRunner{}).
Owns(&corev1.Pod{}). Owns(&corev1.Pod{}).

View File

@@ -189,6 +189,25 @@ var _ = Describe("EphemeralRunner", func() {
).Should(BeEquivalentTo(true)) ).Should(BeEquivalentTo(true))
}) })
It("It should failed if a pod template is invalid", func() {
invalideEphemeralRunner := newExampleRunner("invalid-ephemeral-runner", autoscalingNS.Name, configSecret.Name)
invalideEphemeralRunner.Spec.Spec.PriorityClassName = "notexist"
err := k8sClient.Create(ctx, invalideEphemeralRunner)
Expect(err).To(BeNil())
updated := new(v1alpha1.EphemeralRunner)
Eventually(func() (corev1.PodPhase, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: invalideEphemeralRunner.Name, Namespace: invalideEphemeralRunner.Namespace}, updated)
if err != nil {
return "", nil
}
return updated.Status.Phase, nil
}, timeout, interval).Should(BeEquivalentTo(corev1.PodFailed))
Expect(updated.Status.Reason).Should(Equal("InvalidPod"))
Expect(updated.Status.Message).Should(Equal("Failed to create the pod: pods \"invalid-ephemeral-runner\" is forbidden: no PriorityClass with name notexist was found"))
})
It("It should clean up resources when deleted", func() { It("It should clean up resources when deleted", func() {
// wait for pod to be created // wait for pod to be created
pod := new(corev1.Pod) pod := new(corev1.Pod)
@@ -652,8 +671,10 @@ var _ = Describe("EphemeralRunner", func() {
fake.WithGetRunner( fake.WithGetRunner(
nil, nil,
&actions.ActionsError{ &actions.ActionsError{
StatusCode: http.StatusNotFound, StatusCode: http.StatusNotFound,
ExceptionName: "AgentNotFoundException", Err: &actions.ActionsExceptionError{
ExceptionName: "AgentNotFoundException",
},
}, },
), ),
), ),

View File

@@ -22,7 +22,7 @@ import (
"fmt" "fmt"
"net/http" "net/http"
"sort" "sort"
"strings" "strconv"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/controllers/actions.github.com/metrics" "github.com/actions/actions-runner-controller/controllers/actions.github.com/metrics"
@@ -156,14 +156,14 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
return ctrl.Result{}, err return ctrl.Result{}, err
} }
pendingEphemeralRunners, runningEphemeralRunners, finishedEphemeralRunners, failedEphemeralRunners, deletingEphemeralRunners := categorizeEphemeralRunners(ephemeralRunnerList) ephemeralRunnerState := newEphemeralRunnerState(ephemeralRunnerList)
log.Info("Ephemeral runner counts", log.Info("Ephemeral runner counts",
"pending", len(pendingEphemeralRunners), "pending", len(ephemeralRunnerState.pending),
"running", len(runningEphemeralRunners), "running", len(ephemeralRunnerState.running),
"finished", len(finishedEphemeralRunners), "finished", len(ephemeralRunnerState.finished),
"failed", len(failedEphemeralRunners), "failed", len(ephemeralRunnerState.failed),
"deleting", len(deletingEphemeralRunners), "deleting", len(ephemeralRunnerState.deleting),
) )
if r.PublishMetrics { if r.PublishMetrics {
@@ -183,54 +183,56 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
Organization: parsedURL.Organization, Organization: parsedURL.Organization,
Enterprise: parsedURL.Enterprise, Enterprise: parsedURL.Enterprise,
}, },
len(pendingEphemeralRunners), len(ephemeralRunnerState.pending),
len(runningEphemeralRunners), len(ephemeralRunnerState.running),
len(failedEphemeralRunners), len(ephemeralRunnerState.failed),
) )
} }
// cleanup finished runners and proceed total := ephemeralRunnerState.scaleTotal()
var errs []error if ephemeralRunnerSet.Spec.PatchID == 0 || ephemeralRunnerSet.Spec.PatchID != ephemeralRunnerState.latestPatchID {
for i := range finishedEphemeralRunners { defer func() {
log.Info("Deleting finished ephemeral runner", "name", finishedEphemeralRunners[i].Name) if err := r.cleanupFinishedEphemeralRunners(ctx, ephemeralRunnerState.finished, log); err != nil {
if err := r.Delete(ctx, finishedEphemeralRunners[i]); err != nil { log.Error(err, "failed to cleanup finished ephemeral runners")
if !kerrors.IsNotFound(err) { }
errs = append(errs, err) }()
log.Info("Scaling comparison", "current", total, "desired", ephemeralRunnerSet.Spec.Replicas)
switch {
case total < ephemeralRunnerSet.Spec.Replicas: // Handle scale up
count := ephemeralRunnerSet.Spec.Replicas - total
log.Info("Creating new ephemeral runners (scale up)", "count", count)
if err := r.createEphemeralRunners(ctx, ephemeralRunnerSet, count, log); err != nil {
log.Error(err, "failed to make ephemeral runner")
return ctrl.Result{}, err
} }
}
}
if len(errs) > 0 { case ephemeralRunnerSet.Spec.PatchID > 0 && total >= ephemeralRunnerSet.Spec.Replicas: // Handle scale down scenario.
mergedErrs := multierr.Combine(errs...) // If ephemeral runner did not yet update the phase to succeeded, but the scale down
log.Error(mergedErrs, "Failed to delete finished ephemeral runners") // request is issued, we should ignore the scale down request.
return ctrl.Result{}, mergedErrs // Eventually, the ephemeral runner will be cleaned up on the next patch request, which happens
} // on the next batch
case ephemeralRunnerSet.Spec.PatchID == 0 && total > ephemeralRunnerSet.Spec.Replicas:
total := len(pendingEphemeralRunners) + len(runningEphemeralRunners) + len(failedEphemeralRunners) count := total - ephemeralRunnerSet.Spec.Replicas
log.Info("Scaling comparison", "current", total, "desired", ephemeralRunnerSet.Spec.Replicas) log.Info("Deleting ephemeral runners (scale down)", "count", count)
switch { if err := r.deleteIdleEphemeralRunners(
case total < ephemeralRunnerSet.Spec.Replicas: // Handle scale up ctx,
count := ephemeralRunnerSet.Spec.Replicas - total ephemeralRunnerSet,
log.Info("Creating new ephemeral runners (scale up)", "count", count) ephemeralRunnerState.pending,
if err := r.createEphemeralRunners(ctx, ephemeralRunnerSet, count, log); err != nil { ephemeralRunnerState.running,
log.Error(err, "failed to make ephemeral runner") count,
return ctrl.Result{}, err log,
} ); err != nil {
log.Error(err, "failed to delete idle runners")
case total > ephemeralRunnerSet.Spec.Replicas: // Handle scale down scenario. return ctrl.Result{}, err
count := total - ephemeralRunnerSet.Spec.Replicas }
log.Info("Deleting ephemeral runners (scale down)", "count", count)
if err := r.deleteIdleEphemeralRunners(ctx, ephemeralRunnerSet, pendingEphemeralRunners, runningEphemeralRunners, count, log); err != nil {
log.Error(err, "failed to delete idle runners")
return ctrl.Result{}, err
} }
} }
desiredStatus := v1alpha1.EphemeralRunnerSetStatus{ desiredStatus := v1alpha1.EphemeralRunnerSetStatus{
CurrentReplicas: total, CurrentReplicas: total,
PendingEphemeralRunners: len(pendingEphemeralRunners), PendingEphemeralRunners: len(ephemeralRunnerState.pending),
RunningEphemeralRunners: len(runningEphemeralRunners), RunningEphemeralRunners: len(ephemeralRunnerState.running),
FailedEphemeralRunners: len(failedEphemeralRunners), FailedEphemeralRunners: len(ephemeralRunnerState.failed),
} }
// Update the status if needed. // Update the status if needed.
@@ -247,6 +249,21 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
func (r *EphemeralRunnerSetReconciler) cleanupFinishedEphemeralRunners(ctx context.Context, finishedEphemeralRunners []*v1alpha1.EphemeralRunner, log logr.Logger) error {
// cleanup finished runners and proceed
var errs []error
for i := range finishedEphemeralRunners {
log.Info("Deleting finished ephemeral runner", "name", finishedEphemeralRunners[i].Name)
if err := r.Delete(ctx, finishedEphemeralRunners[i]); err != nil {
if !kerrors.IsNotFound(err) {
errs = append(errs, err)
}
}
}
return multierr.Combine(errs...)
}
func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) error { func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) error {
if ephemeralRunnerSet.Spec.EphemeralRunnerSpec.Proxy == nil { if ephemeralRunnerSet.Spec.EphemeralRunnerSpec.Proxy == nil {
return nil return nil
@@ -284,19 +301,19 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
return true, nil return true, nil
} }
pendingEphemeralRunners, runningEphemeralRunners, finishedEphemeralRunners, failedEphemeralRunners, deletingEphemeralRunners := categorizeEphemeralRunners(ephemeralRunnerList) ephemeralRunnerState := newEphemeralRunnerState(ephemeralRunnerList)
log.Info("Clean up runner counts", log.Info("Clean up runner counts",
"pending", len(pendingEphemeralRunners), "pending", len(ephemeralRunnerState.pending),
"running", len(runningEphemeralRunners), "running", len(ephemeralRunnerState.running),
"finished", len(finishedEphemeralRunners), "finished", len(ephemeralRunnerState.finished),
"failed", len(failedEphemeralRunners), "failed", len(ephemeralRunnerState.failed),
"deleting", len(deletingEphemeralRunners), "deleting", len(ephemeralRunnerState.deleting),
) )
log.Info("Cleanup finished or failed ephemeral runners") log.Info("Cleanup finished or failed ephemeral runners")
var errs []error var errs []error
for _, ephemeralRunner := range append(finishedEphemeralRunners, failedEphemeralRunners...) { for _, ephemeralRunner := range append(ephemeralRunnerState.finished, ephemeralRunnerState.failed...) {
log.Info("Deleting ephemeral runner", "name", ephemeralRunner.Name) log.Info("Deleting ephemeral runner", "name", ephemeralRunner.Name)
if err := r.Delete(ctx, ephemeralRunner); err != nil && !kerrors.IsNotFound(err) { if err := r.Delete(ctx, ephemeralRunner); err != nil && !kerrors.IsNotFound(err) {
errs = append(errs, err) errs = append(errs, err)
@@ -310,7 +327,7 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
} }
// avoid fetching the client if we have nothing left to do // avoid fetching the client if we have nothing left to do
if len(runningEphemeralRunners) == 0 && len(pendingEphemeralRunners) == 0 { if len(ephemeralRunnerState.running) == 0 && len(ephemeralRunnerState.pending) == 0 {
return false, nil return false, nil
} }
@@ -321,7 +338,7 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
log.Info("Cleanup pending or running ephemeral runners") log.Info("Cleanup pending or running ephemeral runners")
errs = errs[0:0] errs = errs[0:0]
for _, ephemeralRunner := range append(pendingEphemeralRunners, runningEphemeralRunners...) { for _, ephemeralRunner := range append(ephemeralRunnerState.pending, ephemeralRunnerState.running...) {
log.Info("Removing the ephemeral runner from the service", "name", ephemeralRunner.Name) log.Info("Removing the ephemeral runner from the service", "name", ephemeralRunner.Name)
_, err := r.deleteEphemeralRunnerWithActionsClient(ctx, ephemeralRunner, actionsClient, log) _, err := r.deleteEphemeralRunnerWithActionsClient(ctx, ephemeralRunner, actionsClient, log)
if err != nil { if err != nil {
@@ -414,6 +431,9 @@ func (r *EphemeralRunnerSetReconciler) createProxySecret(ctx context.Context, ep
// When this happens, the next reconcile loop will try to delete the remaining ephemeral runners // When this happens, the next reconcile loop will try to delete the remaining ephemeral runners
// after we get notified by any of the `v1alpha1.EphemeralRunner.Status` updates. // after we get notified by any of the `v1alpha1.EphemeralRunner.Status` updates.
func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, pendingEphemeralRunners, runningEphemeralRunners []*v1alpha1.EphemeralRunner, count int, log logr.Logger) error { func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, pendingEphemeralRunners, runningEphemeralRunners []*v1alpha1.EphemeralRunner, count int, log logr.Logger) error {
if count <= 0 {
return nil
}
runners := newEphemeralRunnerStepper(pendingEphemeralRunners, runningEphemeralRunners) runners := newEphemeralRunnerStepper(pendingEphemeralRunners, runningEphemeralRunners)
if runners.len() == 0 { if runners.len() == 0 {
log.Info("No pending or running ephemeral runners running at this time for scale down") log.Info("No pending or running ephemeral runners running at this time for scale down")
@@ -427,12 +447,13 @@ func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Co
deletedCount := 0 deletedCount := 0
for runners.next() { for runners.next() {
ephemeralRunner := runners.object() ephemeralRunner := runners.object()
if ephemeralRunner.Status.RunnerId == 0 { isDone := ephemeralRunner.IsDone()
if !isDone && ephemeralRunner.Status.RunnerId == 0 {
log.Info("Skipping ephemeral runner since it is not registered yet", "name", ephemeralRunner.Name) log.Info("Skipping ephemeral runner since it is not registered yet", "name", ephemeralRunner.Name)
continue continue
} }
if ephemeralRunner.Status.JobRequestId > 0 { if !isDone && ephemeralRunner.Status.JobRequestId > 0 {
log.Info("Skipping ephemeral runner since it is running a job", "name", ephemeralRunner.Name, "jobRequestId", ephemeralRunner.Status.JobRequestId) log.Info("Skipping ephemeral runner since it is running a job", "name", ephemeralRunner.Name, "jobRequestId", ephemeralRunner.Status.JobRequestId)
continue continue
} }
@@ -458,10 +479,14 @@ func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Co
func (r *EphemeralRunnerSetReconciler) deleteEphemeralRunnerWithActionsClient(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, actionsClient actions.ActionsService, log logr.Logger) (bool, error) { func (r *EphemeralRunnerSetReconciler) deleteEphemeralRunnerWithActionsClient(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, actionsClient actions.ActionsService, log logr.Logger) (bool, error) {
if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil { if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil {
actionsError := &actions.ActionsError{} actionsError := &actions.ActionsError{}
if errors.As(err, &actionsError) && if !errors.As(err, &actionsError) {
actionsError.StatusCode == http.StatusBadRequest && log.Error(err, "failed to remove runner from the service", "name", ephemeralRunner.Name, "runnerId", ephemeralRunner.Status.RunnerId)
strings.Contains(actionsError.ExceptionName, "JobStillRunningException") { return false, err
// Runner is still running a job, proceed with the next one }
if actionsError.StatusCode == http.StatusBadRequest &&
actionsError.IsException("JobStillRunningException") {
log.Info("Runner is still running a job, skipping deletion", "name", ephemeralRunner.Name, "runnerId", ephemeralRunner.Status.RunnerId)
return false, nil return false, nil
} }
@@ -546,28 +571,6 @@ func (r *EphemeralRunnerSetReconciler) actionsClientOptionsFor(ctx context.Conte
// SetupWithManager sets up the controller with the Manager. // SetupWithManager sets up the controller with the Manager.
func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error { func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
// Index EphemeralRunner owned by EphemeralRunnerSet so we can perform faster look ups.
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunner{}, resourceOwnerKey, func(rawObj client.Object) []string {
groupVersion := v1alpha1.GroupVersion.String()
// grab the job object, extract the owner...
ephemeralRunner := rawObj.(*v1alpha1.EphemeralRunner)
owner := metav1.GetControllerOf(ephemeralRunner)
if owner == nil {
return nil
}
// ...make sure it is owned by this controller
if owner.APIVersion != groupVersion || owner.Kind != "EphemeralRunnerSet" {
return nil
}
// ...and if so, return it
return []string{owner.Name}
}); err != nil {
return err
}
return ctrl.NewControllerManagedBy(mgr). return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.EphemeralRunnerSet{}). For(&v1alpha1.EphemeralRunnerSet{}).
Owns(&v1alpha1.EphemeralRunner{}). Owns(&v1alpha1.EphemeralRunner{}).
@@ -580,16 +583,22 @@ type ephemeralRunnerStepper struct {
index int index int
} }
func newEphemeralRunnerStepper(pending, running []*v1alpha1.EphemeralRunner) *ephemeralRunnerStepper { func newEphemeralRunnerStepper(primary []*v1alpha1.EphemeralRunner, othersOrdered ...[]*v1alpha1.EphemeralRunner) *ephemeralRunnerStepper {
sort.Slice(pending, func(i, j int) bool { sort.Slice(primary, func(i, j int) bool {
return pending[i].GetCreationTimestamp().Time.Before(pending[j].GetCreationTimestamp().Time) return primary[i].GetCreationTimestamp().Time.Before(primary[j].GetCreationTimestamp().Time)
})
sort.Slice(running, func(i, j int) bool {
return running[i].GetCreationTimestamp().Time.Before(running[j].GetCreationTimestamp().Time)
}) })
for _, bucket := range othersOrdered {
sort.Slice(bucket, func(i, j int) bool {
return bucket[i].GetCreationTimestamp().Time.Before(bucket[j].GetCreationTimestamp().Time)
})
}
for _, bucket := range othersOrdered {
primary = append(primary, bucket...)
}
return &ephemeralRunnerStepper{ return &ephemeralRunnerStepper{
items: append(pending, running...), items: primary,
index: -1, index: -1,
} }
} }
@@ -613,28 +622,48 @@ func (s *ephemeralRunnerStepper) len() int {
return len(s.items) return len(s.items)
} }
func categorizeEphemeralRunners(ephemeralRunnerList *v1alpha1.EphemeralRunnerList) (pendingEphemeralRunners, runningEphemeralRunners, finishedEphemeralRunners, failedEphemeralRunners, deletingEphemeralRunners []*v1alpha1.EphemeralRunner) { type ephemeralRunnerState struct {
pending []*v1alpha1.EphemeralRunner
running []*v1alpha1.EphemeralRunner
finished []*v1alpha1.EphemeralRunner
failed []*v1alpha1.EphemeralRunner
deleting []*v1alpha1.EphemeralRunner
latestPatchID int
}
func newEphemeralRunnerState(ephemeralRunnerList *v1alpha1.EphemeralRunnerList) *ephemeralRunnerState {
var ephemeralRunnerState ephemeralRunnerState
for i := range ephemeralRunnerList.Items { for i := range ephemeralRunnerList.Items {
r := &ephemeralRunnerList.Items[i] r := &ephemeralRunnerList.Items[i]
patchID, err := strconv.Atoi(r.Annotations[AnnotationKeyPatchID])
if err == nil && patchID > ephemeralRunnerState.latestPatchID {
ephemeralRunnerState.latestPatchID = patchID
}
if !r.ObjectMeta.DeletionTimestamp.IsZero() { if !r.ObjectMeta.DeletionTimestamp.IsZero() {
deletingEphemeralRunners = append(deletingEphemeralRunners, r) ephemeralRunnerState.deleting = append(ephemeralRunnerState.deleting, r)
continue continue
} }
switch r.Status.Phase { switch r.Status.Phase {
case corev1.PodRunning: case corev1.PodRunning:
runningEphemeralRunners = append(runningEphemeralRunners, r) ephemeralRunnerState.running = append(ephemeralRunnerState.running, r)
case corev1.PodSucceeded: case corev1.PodSucceeded:
finishedEphemeralRunners = append(finishedEphemeralRunners, r) ephemeralRunnerState.finished = append(ephemeralRunnerState.finished, r)
case corev1.PodFailed: case corev1.PodFailed:
failedEphemeralRunners = append(failedEphemeralRunners, r) ephemeralRunnerState.failed = append(ephemeralRunnerState.failed, r)
default: default:
// Pending or no phase should be considered as pending. // Pending or no phase should be considered as pending.
// //
// If field is not set, that means that the EphemeralRunner // If field is not set, that means that the EphemeralRunner
// did not yet have chance to update the Status.Phase field. // did not yet have chance to update the Status.Phase field.
pendingEphemeralRunners = append(pendingEphemeralRunners, r) ephemeralRunnerState.pending = append(ephemeralRunnerState.pending, r)
} }
} }
return return &ephemeralRunnerState
}
func (s *ephemeralRunnerState) scaleTotal() int {
return len(s.pending) + len(s.running) + len(s.failed)
} }

View File

@@ -18,6 +18,9 @@ const defaultGitHubToken = "gh_token"
func startManagers(t ginkgo.GinkgoTInterface, first manager.Manager, others ...manager.Manager) { func startManagers(t ginkgo.GinkgoTInterface, first manager.Manager, others ...manager.Manager) {
for _, mgr := range append([]manager.Manager{first}, others...) { for _, mgr := range append([]manager.Manager{first}, others...) {
if err := SetupIndexers(mgr); err != nil {
t.Fatalf("failed to setup indexers: %v", err)
}
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
g, ctx := errgroup.WithContext(ctx) g, ctx := errgroup.WithContext(ctx)

View File

@@ -0,0 +1,71 @@
package actionsgithubcom
import (
"context"
"slices"
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func SetupIndexers(mgr ctrl.Manager) error {
if err := mgr.GetFieldIndexer().IndexField(
context.Background(),
&corev1.Pod{},
resourceOwnerKey,
newGroupVersionOwnerKindIndexer("AutoscalingListener", "EphemeralRunner"),
); err != nil {
return err
}
if err := mgr.GetFieldIndexer().IndexField(
context.Background(),
&corev1.ServiceAccount{},
resourceOwnerKey,
newGroupVersionOwnerKindIndexer("AutoscalingListener"),
); err != nil {
return err
}
if err := mgr.GetFieldIndexer().IndexField(
context.Background(),
&v1alpha1.EphemeralRunnerSet{},
resourceOwnerKey,
newGroupVersionOwnerKindIndexer("AutoscalingRunnerSet"),
); err != nil {
return err
}
if err := mgr.GetFieldIndexer().IndexField(
context.Background(),
&v1alpha1.EphemeralRunner{},
resourceOwnerKey,
newGroupVersionOwnerKindIndexer("EphemeralRunnerSet"),
); err != nil {
return err
}
return nil
}
func newGroupVersionOwnerKindIndexer(ownerKind string, otherOwnerKinds ...string) client.IndexerFunc {
owners := append([]string{ownerKind}, otherOwnerKinds...)
return func(o client.Object) []string {
groupVersion := v1alpha1.GroupVersion.String()
owner := metav1.GetControllerOfNoCopy(o)
if owner == nil {
return nil
}
// ...make sure it is owned by this controller
if owner.APIVersion != groupVersion || !slices.Contains(owners, owner.Kind) {
return nil
}
// ...and if so, return it
return []string{owner.Name}
}
}

View File

@@ -38,8 +38,11 @@ var commonLabelKeys = [...]string{
const labelValueKubernetesPartOf = "gha-runner-scale-set" const labelValueKubernetesPartOf = "gha-runner-scale-set"
var scaleSetListenerLogLevel = DefaultScaleSetListenerLogLevel var (
var scaleSetListenerLogFormat = DefaultScaleSetListenerLogFormat scaleSetListenerLogLevel = DefaultScaleSetListenerLogLevel
scaleSetListenerLogFormat = DefaultScaleSetListenerLogFormat
scaleSetListenerEntrypoint = "/ghalistener"
)
func SetListenerLoggingParameters(level string, format string) bool { func SetListenerLoggingParameters(level string, format string) bool {
switch level { switch level {
@@ -59,6 +62,12 @@ func SetListenerLoggingParameters(level string, format string) bool {
return true return true
} }
func SetListenerEntrypoint(entrypoint string) {
if entrypoint != "" {
scaleSetListenerEntrypoint = entrypoint
}
}
type resourceBuilder struct{} type resourceBuilder struct{}
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) { func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
@@ -76,13 +85,17 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
} }
labels := map[string]string{ labels := mergeLabels(autoscalingRunnerSet.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace, LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name, LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf, LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesComponent: "runner-scale-set-listener", LabelKeyKubernetesComponent: "runner-scale-set-listener",
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
labelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(), })
annotations := map[string]string{
annotationKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(),
annotationKeyValuesHash: autoscalingRunnerSet.Annotations[annotationKeyValuesHash],
} }
if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, labels); err != nil { if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, labels); err != nil {
@@ -91,9 +104,10 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.
autoscalingListener := &v1alpha1.AutoscalingListener{ autoscalingListener := &v1alpha1.AutoscalingListener{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerName(autoscalingRunnerSet), Name: scaleSetListenerName(autoscalingRunnerSet),
Namespace: namespace, Namespace: namespace,
Labels: labels, Labels: labels,
Annotations: annotations,
}, },
Spec: v1alpha1.AutoscalingListenerSpec{ Spec: v1alpha1.AutoscalingListenerSpec{
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl, GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
@@ -217,6 +231,7 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
ports = append(ports, port) ports = append(ports, port)
} }
terminationGracePeriodSeconds := int64(60)
podSpec := corev1.PodSpec{ podSpec := corev1.PodSpec{
ServiceAccountName: serviceAccount.Name, ServiceAccountName: serviceAccount.Name,
Containers: []corev1.Container{ Containers: []corev1.Container{
@@ -225,7 +240,7 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
Image: autoscalingListener.Spec.Image, Image: autoscalingListener.Spec.Image,
Env: listenerEnv, Env: listenerEnv,
Command: []string{ Command: []string{
"/github-runnerscaleset-listener", scaleSetListenerEntrypoint,
}, },
Ports: ports, Ports: ports,
VolumeMounts: []corev1.VolumeMount{ VolumeMounts: []corev1.VolumeMount{
@@ -247,8 +262,9 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
}, },
}, },
}, },
ImagePullSecrets: autoscalingListener.Spec.ImagePullSecrets, ImagePullSecrets: autoscalingListener.Spec.ImagePullSecrets,
RestartPolicy: corev1.RestartPolicyNever, RestartPolicy: corev1.RestartPolicyNever,
TerminationGracePeriodSeconds: &terminationGracePeriodSeconds,
} }
labels := make(map[string]string, len(autoscalingListener.Labels)) labels := make(map[string]string, len(autoscalingListener.Labels))
@@ -300,7 +316,7 @@ func mergeListenerPodWithTemplate(pod *corev1.Pod, tmpl *corev1.PodTemplateSpec)
c := &tmpl.Spec.Containers[i] c := &tmpl.Spec.Containers[i]
switch c.Name { switch c.Name {
case "listener": case autoscalingListenerContainerName:
mergeListenerContainer(listenerContainer, c) mergeListenerContainer(listenerContainer, c)
default: default:
pod.Spec.Containers = append(pod.Spec.Containers, *c) pod.Spec.Containers = append(pod.Spec.Containers, *c)
@@ -395,10 +411,10 @@ func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerServiceAccountName(autoscalingListener), Name: scaleSetListenerServiceAccountName(autoscalingListener),
Namespace: autoscalingListener.Namespace, Namespace: autoscalingListener.Namespace,
Labels: map[string]string{ Labels: mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
}, }),
}, },
} }
} }
@@ -410,13 +426,13 @@ func (b *resourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerRoleName(autoscalingListener), Name: scaleSetListenerRoleName(autoscalingListener),
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
Labels: map[string]string{ Labels: mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
labelKeyListenerNamespace: autoscalingListener.Namespace, labelKeyListenerNamespace: autoscalingListener.Namespace,
labelKeyListenerName: autoscalingListener.Name, labelKeyListenerName: autoscalingListener.Name,
"role-policy-rules-hash": rulesHash, "role-policy-rules-hash": rulesHash,
}, }),
}, },
Rules: rules, Rules: rules,
} }
@@ -444,14 +460,14 @@ func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerRoleName(autoscalingListener), Name: scaleSetListenerRoleName(autoscalingListener),
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
Labels: map[string]string{ Labels: mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
labelKeyListenerNamespace: autoscalingListener.Namespace, labelKeyListenerNamespace: autoscalingListener.Namespace,
labelKeyListenerName: autoscalingListener.Name, labelKeyListenerName: autoscalingListener.Name,
"role-binding-role-ref-hash": roleRefHash, "role-binding-role-ref-hash": roleRefHash,
"role-binding-subject-hash": subjectHash, "role-binding-subject-hash": subjectHash,
}, }),
}, },
RoleRef: roleRef, RoleRef: roleRef,
Subjects: subjects, Subjects: subjects,
@@ -467,11 +483,11 @@ func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerSecretMirrorName(autoscalingListener), Name: scaleSetListenerSecretMirrorName(autoscalingListener),
Namespace: autoscalingListener.Namespace, Namespace: autoscalingListener.Namespace,
Labels: map[string]string{ Labels: mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
"secret-data-hash": dataHash, "secret-data-hash": dataHash,
}, }),
}, },
Data: secret.DeepCopy().Data, Data: secret.DeepCopy().Data,
} }
@@ -486,21 +502,23 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
} }
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash() runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
labels := map[string]string{ labels := mergeLabels(autoscalingRunnerSet.Labels, map[string]string{
labelKeyRunnerSpecHash: runnerSpecHash,
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf, LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesComponent: "runner-set", LabelKeyKubernetesComponent: "runner-set",
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name, LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace, LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
} })
if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, labels); err != nil { if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, labels); err != nil {
return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err) return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err)
} }
newAnnotations := map[string]string{ newAnnotations := map[string]string{
AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName],
AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName],
AnnotationKeyGitHubRunnerScaleSetName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName],
annotationKeyRunnerSpecHash: runnerSpecHash,
} }
newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{ newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{
@@ -529,22 +547,19 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner { func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {
labels := make(map[string]string) labels := make(map[string]string)
for _, key := range commonLabelKeys { for k, v := range ephemeralRunnerSet.Labels {
switch key { if k == LabelKeyKubernetesComponent {
case LabelKeyKubernetesComponent: labels[k] = "runner"
labels[key] = "runner" } else {
default: labels[k] = v
v, ok := ephemeralRunnerSet.Labels[key]
if !ok {
continue
}
labels[key] = v
} }
} }
annotations := make(map[string]string) annotations := make(map[string]string)
for key, val := range ephemeralRunnerSet.Annotations { for key, val := range ephemeralRunnerSet.Annotations {
annotations[key] = val annotations[key] = val
} }
annotations[AnnotationKeyPatchID] = strconv.Itoa(ephemeralRunnerSet.Spec.PatchID)
return &v1alpha1.EphemeralRunner{ return &v1alpha1.EphemeralRunner{
TypeMeta: metav1.TypeMeta{}, TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@@ -732,3 +747,17 @@ func trimLabelValue(val string) string {
} }
return val return val
} }
func mergeLabels(base, overwrite map[string]string) map[string]string {
mergedLabels := map[string]string{}
for k, v := range base {
mergedLabels[k] = v
}
for k, v := range overwrite {
mergedLabels[k] = v
}
return mergedLabels
}

View File

@@ -21,10 +21,12 @@ func TestLabelPropagation(t *testing.T) {
Labels: map[string]string{ Labels: map[string]string{
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf, LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesVersion: "0.2.0", LabelKeyKubernetesVersion: "0.2.0",
"arbitrary-label": "random-value",
}, },
Annotations: map[string]string{ Annotations: map[string]string{
runnerScaleSetIdAnnotationKey: "1", runnerScaleSetIdAnnotationKey: "1",
AnnotationKeyGitHubRunnerGroupName: "test-group", AnnotationKeyGitHubRunnerGroupName: "test-group",
AnnotationKeyGitHubRunnerScaleSetName: "test-scale-set",
}, },
}, },
Spec: v1alpha1.AutoscalingRunnerSetSpec{ Spec: v1alpha1.AutoscalingRunnerSetSpec{
@@ -38,25 +40,28 @@ func TestLabelPropagation(t *testing.T) {
assert.Equal(t, labelValueKubernetesPartOf, ephemeralRunnerSet.Labels[LabelKeyKubernetesPartOf]) assert.Equal(t, labelValueKubernetesPartOf, ephemeralRunnerSet.Labels[LabelKeyKubernetesPartOf])
assert.Equal(t, "runner-set", ephemeralRunnerSet.Labels[LabelKeyKubernetesComponent]) assert.Equal(t, "runner-set", ephemeralRunnerSet.Labels[LabelKeyKubernetesComponent])
assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], ephemeralRunnerSet.Labels[LabelKeyKubernetesVersion]) assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], ephemeralRunnerSet.Labels[LabelKeyKubernetesVersion])
assert.NotEmpty(t, ephemeralRunnerSet.Labels[labelKeyRunnerSpecHash]) assert.NotEmpty(t, ephemeralRunnerSet.Annotations[annotationKeyRunnerSpecHash])
assert.Equal(t, autoscalingRunnerSet.Name, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetName]) assert.Equal(t, autoscalingRunnerSet.Name, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetName])
assert.Equal(t, autoscalingRunnerSet.Namespace, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetNamespace]) assert.Equal(t, autoscalingRunnerSet.Namespace, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetNamespace])
assert.Equal(t, "", ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise]) assert.Equal(t, "", ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise])
assert.Equal(t, "org", ephemeralRunnerSet.Labels[LabelKeyGitHubOrganization]) assert.Equal(t, "org", ephemeralRunnerSet.Labels[LabelKeyGitHubOrganization])
assert.Equal(t, "repo", ephemeralRunnerSet.Labels[LabelKeyGitHubRepository]) assert.Equal(t, "repo", ephemeralRunnerSet.Labels[LabelKeyGitHubRepository])
assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName], ephemeralRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName]) assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName], ephemeralRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName])
assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName], ephemeralRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName])
assert.Equal(t, autoscalingRunnerSet.Labels["arbitrary-label"], ephemeralRunnerSet.Labels["arbitrary-label"])
listener, err := b.newAutoScalingListener(&autoscalingRunnerSet, ephemeralRunnerSet, autoscalingRunnerSet.Namespace, "test:latest", nil) listener, err := b.newAutoScalingListener(&autoscalingRunnerSet, ephemeralRunnerSet, autoscalingRunnerSet.Namespace, "test:latest", nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, labelValueKubernetesPartOf, listener.Labels[LabelKeyKubernetesPartOf]) assert.Equal(t, labelValueKubernetesPartOf, listener.Labels[LabelKeyKubernetesPartOf])
assert.Equal(t, "runner-scale-set-listener", listener.Labels[LabelKeyKubernetesComponent]) assert.Equal(t, "runner-scale-set-listener", listener.Labels[LabelKeyKubernetesComponent])
assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], listener.Labels[LabelKeyKubernetesVersion]) assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], listener.Labels[LabelKeyKubernetesVersion])
assert.NotEmpty(t, ephemeralRunnerSet.Labels[labelKeyRunnerSpecHash]) assert.NotEmpty(t, ephemeralRunnerSet.Annotations[annotationKeyRunnerSpecHash])
assert.Equal(t, autoscalingRunnerSet.Name, listener.Labels[LabelKeyGitHubScaleSetName]) assert.Equal(t, autoscalingRunnerSet.Name, listener.Labels[LabelKeyGitHubScaleSetName])
assert.Equal(t, autoscalingRunnerSet.Namespace, listener.Labels[LabelKeyGitHubScaleSetNamespace]) assert.Equal(t, autoscalingRunnerSet.Namespace, listener.Labels[LabelKeyGitHubScaleSetNamespace])
assert.Equal(t, "", listener.Labels[LabelKeyGitHubEnterprise]) assert.Equal(t, "", listener.Labels[LabelKeyGitHubEnterprise])
assert.Equal(t, "org", listener.Labels[LabelKeyGitHubOrganization]) assert.Equal(t, "org", listener.Labels[LabelKeyGitHubOrganization])
assert.Equal(t, "repo", listener.Labels[LabelKeyGitHubRepository]) assert.Equal(t, "repo", listener.Labels[LabelKeyGitHubRepository])
assert.Equal(t, autoscalingRunnerSet.Labels["arbitrary-label"], listener.Labels["arbitrary-label"])
listenerServiceAccount := &corev1.ServiceAccount{ listenerServiceAccount := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@@ -83,6 +88,7 @@ func TestLabelPropagation(t *testing.T) {
} }
assert.Equal(t, "runner", ephemeralRunner.Labels[LabelKeyKubernetesComponent]) assert.Equal(t, "runner", ephemeralRunner.Labels[LabelKeyKubernetesComponent])
assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName], ephemeralRunner.Annotations[AnnotationKeyGitHubRunnerGroupName]) assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName], ephemeralRunner.Annotations[AnnotationKeyGitHubRunnerGroupName])
assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName], ephemeralRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName])
runnerSecret := &corev1.Secret{ runnerSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@@ -109,8 +115,9 @@ func TestGitHubURLTrimLabelValues(t *testing.T) {
LabelKeyKubernetesVersion: "0.2.0", LabelKeyKubernetesVersion: "0.2.0",
}, },
Annotations: map[string]string{ Annotations: map[string]string{
runnerScaleSetIdAnnotationKey: "1", runnerScaleSetIdAnnotationKey: "1",
AnnotationKeyGitHubRunnerGroupName: "test-group", AnnotationKeyGitHubRunnerGroupName: "test-group",
AnnotationKeyGitHubRunnerScaleSetName: "test-scale-set",
}, },
}, },
} }

View File

@@ -1,5 +1,8 @@
# About ARC # About ARC
> [!WARNING]
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
## Introduction ## Introduction
This document provides a high-level overview of Actions Runner Controller (ARC). ARC enables running Github Actions Runners on Kubernetes (K8s) clusters. This document provides a high-level overview of Actions Runner Controller (ARC). ARC enables running Github Actions Runners on Kubernetes (K8s) clusters.

View File

@@ -1,6 +1,6 @@
# Changing semantics of the `minRunners` field # Changing semantics of the `minRunners` field
**Status**: Proposed **Status**: Accepted
## Context ## Context

View File

@@ -1,5 +1,8 @@
# Authenticating to the GitHub API # Authenticating to the GitHub API
> [!WARNING]
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
## Setting Up Authentication with GitHub API ## Setting Up Authentication with GitHub API
There are two ways for actions-runner-controller to authenticate with the GitHub API (only 1 can be configured at a time however): There are two ways for actions-runner-controller to authenticate with the GitHub API (only 1 can be configured at a time however):

View File

@@ -1,5 +1,8 @@
# Automatically scaling runners # Automatically scaling runners
> [!WARNING]
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
## Overview ## Overview
> If you are using controller version < [v0.22.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.22.0) and you are not using GHES, and so you can't set your rate limit budget, it is recommended that you use 100 replicas or fewer to prevent being rate limited. > If you are using controller version < [v0.22.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.22.0) and you are not using GHES, and so you can't set your rate limit budget, it is recommended that you use 100 replicas or fewer to prevent being rate limited.

View File

@@ -1,5 +1,8 @@
# Adding ARC runners to a repository, organization, or enterprise # Adding ARC runners to a repository, organization, or enterprise
> [!WARNING]
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
## Usage ## Usage
[GitHub self-hosted runners can be deployed at various levels in a management hierarchy](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/about-self-hosted-runners#about-self-hosted-runners): [GitHub self-hosted runners can be deployed at various levels in a management hierarchy](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/about-self-hosted-runners#about-self-hosted-runners):

View File

@@ -1,5 +1,8 @@
# Configuring Windows runners # Configuring Windows runners
> [!WARNING]
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
## Setting up Windows Runners ## Setting up Windows Runners
The main two steps in enabling Windows self-hosted runners are: The main two steps in enabling Windows self-hosted runners are:

View File

@@ -1,5 +1,8 @@
# Deploying alternative runners # Deploying alternative runners
> [!WARNING]
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
## Alternative Runners ## Alternative Runners
ARC also offers a few alternative runner options ARC also offers a few alternative runner options

View File

@@ -1,5 +1,8 @@
# Deploying ARC runners # Deploying ARC runners
> [!WARNING]
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
## Deploying runners with RunnerDeployments ## Deploying runners with RunnerDeployments
In our previous examples we were deploying a single runner via the `RunnerDeployment` kind, the amount of runners deployed can be statically set via the `replicas:` field, we can increase this value to deploy additional sets of runners instead: In our previous examples we were deploying a single runner via the `RunnerDeployment` kind, the amount of runners deployed can be statically set via the `replicas:` field, we can increase this value to deploy additional sets of runners instead:

View File

@@ -43,6 +43,79 @@ You can follow [this troubleshooting guide](https://docs.github.com/en/actions/h
## Changelog ## Changelog
### v0.9.2
1. Refresh session if token expires during delete message [#3529](https://github.com/actions/actions-runner-controller/pull/3529)
1. Re-use the last desired patch on empty batch [#3453](https://github.com/actions/actions-runner-controller/pull/3453)
1. Extract single place to set up indexers [#3454](https://github.com/actions/actions-runner-controller/pull/3454)
1. Include controller version in logs [#3473](https://github.com/actions/actions-runner-controller/pull/3473)
1. Propogate arbitrary labels from runnersets to all created resources [#3157](https://github.com/actions/actions-runner-controller/pull/3157)
### v0.9.1
#### Major changes
1. Shutdown metrics server when listener exits [#3445](https://github.com/actions/actions-runner-controller/pull/3445)
1. Propagate max capacity information to the actions back-end [#3431](https://github.com/actions/actions-runner-controller/pull/3431)
1. Refactor actions client error to include request id [#3430](https://github.com/actions/actions-runner-controller/pull/3430)
1. Include self correction on empty batch and avoid removing pending runners when cluster is busy [#3426](https://github.com/actions/actions-runner-controller/pull/3426)
1. Add topologySpreadConstraint to gha-runner-scale-set-controller chart [#3405](https://github.com/actions/actions-runner-controller/pull/3405)
### v0.9.0
#### ⚠️ Warning
- This release contains CRD changes. During the upgrade, please remove the old CRDs before re-installing the new version. For more information, please read the [Upgrading ARC](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/deploying-runner-scale-sets-with-actions-runner-controller#upgrading-arc).
- This release contains changes in the [default docker socket path](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/deploying-runner-scale-sets-with-actions-runner-controller#upgrading-arc) expanded for container mode `dind`.
- Older version of the listener (`githubrunnerscalesetlistener`) is deprecated and will be removed in the future `0.10.0` release.
Please evaluate these changes carefully before upgrading.
#### Major changes
1. Change docker socket path to /var/run/docker.sock [#3337](https://github.com/actions/actions-runner-controller/pull/3337)
1. Update metrics to include repository on job-based label [#3310](https://github.com/actions/actions-runner-controller/pull/3310)
1. Bump Go version to 1.22.1 [#3290](https://github.com/actions/actions-runner-controller/pull/3290)
1. Propagate runner scale set name annotation to EphemeralRunner [#3098](https://github.com/actions/actions-runner-controller/pull/3098)
1. Add annotation with values hash to re-create listener [#3195](https://github.com/actions/actions-runner-controller/pull/3195)
1. Fix overscaling when the controller is much faster then the listener [#3371](https://github.com/actions/actions-runner-controller/pull/3371)
1. Add retry on 401 and 403 for runner-registration [#3377](https://github.com/actions/actions-runner-controller/pull/3377)
### v0.8.3
1. Expose volumeMounts and volumes in gha-runner-scale-set-controller [#3260](https://github.com/actions/actions-runner-controller/pull/3260)
1. Refer to the correct variable in discovery error message [#3296](https://github.com/actions/actions-runner-controller/pull/3296)
1. Fix acquire jobs after session refresh ghalistener [#3307](https://github.com/actions/actions-runner-controller/pull/3307)
### v0.8.2
1. Add listener graceful termination period and background context after the message is received [#3187](https://github.com/actions/actions-runner-controller/pull/3187)
1. Publish metrics in the new ghalistener [#3193](https://github.com/actions/actions-runner-controller/pull/3193)
1. Delete message session when listener.Listen returns [#3240](https://github.com/actions/actions-runner-controller/pull/3240)
### v0.8.1
1. Fix proxy issue in new listener client [#3181](https://github.com/actions/actions-runner-controller/pull/3181)
### v0.8.0
1. Change listener container name [#3167](https://github.com/actions/actions-runner-controller/pull/3167)
1. Fix empty env and volumeMounts object on default setup [#3166](https://github.com/actions/actions-runner-controller/pull/3166)
1. Fix override listener pod spec [#3161](https://github.com/actions/actions-runner-controller/pull/3161)
1. Change minRunners behavior and fix the new listener min runners [#3139](https://github.com/actions/actions-runner-controller/pull/3139)
1. Update user agent for new ghalistener [#3138](https://github.com/actions/actions-runner-controller/pull/3138)
1. Bump golang.org/x/oauth2 from 0.14.0 to 0.15.0 [#3127](https://github.com/actions/actions-runner-controller/pull/3127)
1. Bump golang.org.x.net from 0.18.0 to 0.19.0 [#3126](https://github.com/actions/actions-runner-controller/pull/3126)
1. Bump k8s.io/client-go from 0.28.3 to 0.28.4 [#3125](https://github.com/actions/actions-runner-controller/pull/3125)
1. Modify user agent format with subsystem and is proxy configured information [#3116](https://github.com/actions/actions-runner-controller/pull/3116)
1. Record the error when the creation pod fails [#3112](https://github.com/actions/actions-runner-controller/pull/3112)
1. Fix typo in helm chart comment [#3104](https://github.com/actions/actions-runner-controller/pull/3104)
1. Set actions client timeout to 5 minutes, add logging to client [#3103](https://github.com/actions/actions-runner-controller/pull/3103)
1. Refactor listener app with configurable fallback [#3096](https://github.com/actions/actions-runner-controller/pull/3096)
1. Bump github.com/onsi/gomega from 1.29.0 to 1.30.0 [#3094](https://github.com/actions/actions-runner-controller/pull/3094)
1. Bump k8s.io/api from 0.28.3 to 0.28.4 [#3093](https://github.com/actions/actions-runner-controller/pull/3093)
1. Bump k8s.io/apimachinery from 0.28.3 to 0.28.4 [#3092](https://github.com/actions/actions-runner-controller/pull/3092)
1. Bump github.com/gruntwork-io/terratest from 0.41.24 to 0.46.7 [#3091](https://github.com/actions/actions-runner-controller/pull/3091)
1. Record a reason for pod failure in EphemeralRunner [#3074](https://github.com/actions/actions-runner-controller/pull/3074)
1. ADR: Changing semantics of min runners to be min idle runners [#3040](https://github.com/actions/actions-runner-controller/pull/3040)
### v0.7.0 ### v0.7.0
1. Add ResizePolicy and RestartPolicy on mergeListenerContainer [#3075](https://github.com/actions/actions-runner-controller/pull/3075) 1. Add ResizePolicy and RestartPolicy on mergeListenerContainer [#3075](https://github.com/actions/actions-runner-controller/pull/3075)
1. feat: GHA controller Helm Chart quoted labels [#3061](https://github.com/actions/actions-runner-controller/pull/3061) 1. feat: GHA controller Helm Chart quoted labels [#3061](https://github.com/actions/actions-runner-controller/pull/3061)

View File

@@ -12,4 +12,4 @@ We do not intend to provide a supported ARC dashboard. This is simply a referenc
1. Make sure to have [Grafana](https://grafana.com/docs/grafana/latest/installation/) and [Prometheus](https://prometheus.io/docs/prometheus/latest/installation/) running in your cluster. 1. Make sure to have [Grafana](https://grafana.com/docs/grafana/latest/installation/) and [Prometheus](https://prometheus.io/docs/prometheus/latest/installation/) running in your cluster.
2. Make sure that Prometheus is properly scraping the metrics endpoints of the controller-manager and listeners. 2. Make sure that Prometheus is properly scraping the metrics endpoints of the controller-manager and listeners.
3. Import the [dashboard](ARC-Autoscaling-Runner-Set-Monitoring_1692627561838.json.json) into Grafana. 3. Import the [dashboard](ARC-Autoscaling-Runner-Set-Monitoring_1692627561838.json) into Grafana.

View File

@@ -1,5 +1,8 @@
# Installing ARC # Installing ARC
> [!WARNING]
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
## Installation ## Installation
By default, actions-runner-controller uses [cert-manager](https://cert-manager.io/docs/installation/kubernetes/) for certificate management of Admission Webhook. Make sure you have already installed cert-manager before you install. The installation instructions for the cert-manager can be found below. By default, actions-runner-controller uses [cert-manager](https://cert-manager.io/docs/installation/kubernetes/) for certificate management of Admission Webhook. Make sure you have already installed cert-manager before you install. The installation instructions for the cert-manager can be found below.

View File

@@ -1,5 +1,8 @@
# Managing access with runner groups # Managing access with runner groups
> [!WARNING]
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
## Runner Groups ## Runner Groups
Runner groups can be used to limit which repositories are able to use the GitHub Runner at an organization level. Runner groups have to be [created in GitHub first](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/managing-access-to-self-hosted-runners-using-groups) before they can be referenced. Runner groups can be used to limit which repositories are able to use the GitHub Runner at an organization level. Runner groups have to be [created in GitHub first](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/managing-access-to-self-hosted-runners-using-groups) before they can be referenced.

View File

@@ -1,5 +1,8 @@
# Monitoring and troubleshooting # Monitoring and troubleshooting
> [!WARNING]
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
## Metrics ## Metrics
The controller also exposes Prometheus metrics on a `/metrics` endpoint. By default this is on port `8443` behind an RBAC proxy. The controller also exposes Prometheus metrics on a `/metrics` endpoint. By default this is on port `8443` behind an RBAC proxy.

View File

@@ -1,5 +1,8 @@
# Actions Runner Controller Quickstart # Actions Runner Controller Quickstart
> [!WARNING]
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
GitHub Actions automates the deployment of code to different environments, including production. The environments contain the `GitHub Runner` software which executes the automation. `GitHub Runner` can be run in GitHub-hosted cloud or self-hosted environments. Self-hosted environments offer more control of hardware, operating system, and software tools. They can be run on physical machines, virtual machines, or in a container. Containerized environments are lightweight, loosely coupled, highly efficient and can be managed centrally. However, they are not straightforward to use. GitHub Actions automates the deployment of code to different environments, including production. The environments contain the `GitHub Runner` software which executes the automation. `GitHub Runner` can be run in GitHub-hosted cloud or self-hosted environments. Self-hosted environments offer more control of hardware, operating system, and software tools. They can be run on physical machines, virtual machines, or in a container. Containerized environments are lightweight, loosely coupled, highly efficient and can be managed centrally. However, they are not straightforward to use.
`Actions Runner Controller (ARC)` makes it simpler to run self hosted environments on Kubernetes(K8s) cluster. `Actions Runner Controller (ARC)` makes it simpler to run self hosted environments on Kubernetes(K8s) cluster.

View File

@@ -1,5 +1,8 @@
# Using ARC across organizations # Using ARC across organizations
> [!WARNING]
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
## Multitenancy ## Multitenancy
> This feature requires controller version => [v0.26.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.26.0) > This feature requires controller version => [v0.26.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.26.0)

View File

@@ -1,5 +1,8 @@
# Using ARC runners in a workflow # Using ARC runners in a workflow
> [!WARNING]
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
## Runner Labels ## Runner Labels
To run a workflow job on a self-hosted runner, you can use the following syntax in your workflow: To run a workflow job on a self-hosted runner, you can use the following syntax in your workflow:

View File

@@ -1,5 +1,8 @@
# Using custom volumes # Using custom volumes
> [!WARNING]
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
## Custom Volume mounts ## Custom Volume mounts
You can configure your own custom volume mounts. For example to have the work/docker data in memory or on NVME SSD, for You can configure your own custom volume mounts. For example to have the work/docker data in memory or on NVME SSD, for

View File

@@ -1,5 +1,8 @@
# Using entrypoint features # Using entrypoint features
> [!WARNING]
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
## Runner Entrypoint Features ## Runner Entrypoint Features
> Environment variable values must all be strings > Environment variable values must all be strings

View File

@@ -29,6 +29,9 @@ const (
apiVersionQueryParam = "api-version=6.0-preview" apiVersionQueryParam = "api-version=6.0-preview"
) )
// Header used to propagate capacity information to the back-end
const HeaderScaleSetMaxCapacity = "X-ScaleSetMaxCapacity"
//go:generate mockery --inpackage --name=ActionsService //go:generate mockery --inpackage --name=ActionsService
type ActionsService interface { type ActionsService interface {
GetRunnerScaleSet(ctx context.Context, runnerGroupId int, runnerScaleSetName string) (*RunnerScaleSet, error) GetRunnerScaleSet(ctx context.Context, runnerGroupId int, runnerScaleSetName string) (*RunnerScaleSet, error)
@@ -45,7 +48,7 @@ type ActionsService interface {
AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error)
GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*AcquirableJobList, error) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*AcquirableJobList, error)
GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64) (*RunnerScaleSetMessage, error) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*RunnerScaleSetMessage, error)
DeleteMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, messageId int64) error DeleteMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, messageId int64) error
GenerateJitRunnerConfig(ctx context.Context, jitRunnerSetting *RunnerScaleSetJitRunnerSetting, scaleSetId int) (*RunnerScaleSetJitRunnerConfig, error) GenerateJitRunnerConfig(ctx context.Context, jitRunnerSetting *RunnerScaleSetJitRunnerSetting, scaleSetId int) (*RunnerScaleSetJitRunnerConfig, error)
@@ -104,28 +107,38 @@ type Client struct {
proxyFunc ProxyFunc proxyFunc ProxyFunc
} }
var _ ActionsService = &Client{}
type ProxyFunc func(req *http.Request) (*url.URL, error) type ProxyFunc func(req *http.Request) (*url.URL, error)
type ClientOption func(*Client) type ClientOption func(*Client)
type UserAgentInfo struct { type UserAgentInfo struct {
Version string // Version is the version of the controller
CommitSHA string Version string
// CommitSHA is the git commit SHA of the controller
CommitSHA string
// ScaleSetID is the ID of the scale set
ScaleSetID int ScaleSetID int
// HasProxy is true if the controller is running behind a proxy
HasProxy bool
// Subsystem is the subsystem such as listener, controller, etc.
// Each system may pick its own subsystem name.
Subsystem string
} }
func (u UserAgentInfo) String() string { func (u UserAgentInfo) String() string {
var scaleSetID = "NA" scaleSetID := "NA"
if u.ScaleSetID > 0 { if u.ScaleSetID > 0 {
scaleSetID = strconv.Itoa(u.ScaleSetID) scaleSetID = strconv.Itoa(u.ScaleSetID)
} }
return fmt.Sprintf( proxy := "Proxy/disabled"
"actions-runner-controller/%s CommitSHA/%s ScaleSetID/%s", if u.HasProxy {
u.Version, proxy = "Proxy/enabled"
u.CommitSHA, }
scaleSetID,
) return fmt.Sprintf("actions-runner-controller/%s (%s; %s) ScaleSetID/%s (%s)", u.Version, u.CommitSHA, u.Subsystem, scaleSetID, proxy)
} }
func WithLogger(logger logr.Logger) ClientOption { func WithLogger(logger logr.Logger) ClientOption {
@@ -347,15 +360,22 @@ func (c *Client) GetRunnerScaleSet(ctx context.Context, runnerGroupId int, runne
} }
var runnerScaleSetList *runnerScaleSetsResponse var runnerScaleSetList *runnerScaleSetsResponse
err = json.NewDecoder(resp.Body).Decode(&runnerScaleSetList) if err := json.NewDecoder(resp.Body).Decode(&runnerScaleSetList); err != nil {
if err != nil { return nil, &ActionsError{
return nil, err StatusCode: resp.StatusCode,
ActivityID: resp.Header.Get(HeaderActionsActivityID),
Err: err,
}
} }
if runnerScaleSetList.Count == 0 { if runnerScaleSetList.Count == 0 {
return nil, nil return nil, nil
} }
if runnerScaleSetList.Count > 1 { if runnerScaleSetList.Count > 1 {
return nil, fmt.Errorf("multiple runner scale sets found with name %s", runnerScaleSetName) return nil, &ActionsError{
StatusCode: resp.StatusCode,
ActivityID: resp.Header.Get(HeaderActionsActivityID),
Err: fmt.Errorf("multiple runner scale sets found with name %q", runnerScaleSetName),
}
} }
return &runnerScaleSetList.RunnerScaleSets[0], nil return &runnerScaleSetList.RunnerScaleSets[0], nil
@@ -378,9 +398,12 @@ func (c *Client) GetRunnerScaleSetById(ctx context.Context, runnerScaleSetId int
} }
var runnerScaleSet *RunnerScaleSet var runnerScaleSet *RunnerScaleSet
err = json.NewDecoder(resp.Body).Decode(&runnerScaleSet) if err := json.NewDecoder(resp.Body).Decode(&runnerScaleSet); err != nil {
if err != nil { return nil, &ActionsError{
return nil, err StatusCode: resp.StatusCode,
ActivityID: resp.Header.Get(HeaderActionsActivityID),
Err: err,
}
} }
return runnerScaleSet, nil return runnerScaleSet, nil
} }
@@ -400,23 +423,43 @@ func (c *Client) GetRunnerGroupByName(ctx context.Context, runnerGroup string) (
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
body, err := io.ReadAll(resp.Body) body, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return nil, err return nil, &ActionsError{
StatusCode: resp.StatusCode,
ActivityID: resp.Header.Get(HeaderActionsActivityID),
Err: err,
}
} }
return nil, fmt.Errorf("unexpected status code: %d - body: %s", resp.StatusCode, string(body)) return nil, fmt.Errorf("unexpected status code: %w", &ActionsError{
StatusCode: resp.StatusCode,
ActivityID: resp.Header.Get(HeaderActionsActivityID),
Err: errors.New(string(body)),
})
} }
var runnerGroupList *RunnerGroupList var runnerGroupList *RunnerGroupList
err = json.NewDecoder(resp.Body).Decode(&runnerGroupList) err = json.NewDecoder(resp.Body).Decode(&runnerGroupList)
if err != nil { if err != nil {
return nil, err return nil, &ActionsError{
StatusCode: resp.StatusCode,
ActivityID: resp.Header.Get(HeaderActionsActivityID),
Err: err,
}
} }
if runnerGroupList.Count == 0 { if runnerGroupList.Count == 0 {
return nil, fmt.Errorf("no runner group found with name '%s'", runnerGroup) return nil, &ActionsError{
StatusCode: resp.StatusCode,
ActivityID: resp.Header.Get(HeaderActionsActivityID),
Err: fmt.Errorf("no runner group found with name %q", runnerGroup),
}
} }
if runnerGroupList.Count > 1 { if runnerGroupList.Count > 1 {
return nil, fmt.Errorf("multiple runner group found with name %s", runnerGroup) return nil, &ActionsError{
StatusCode: resp.StatusCode,
ActivityID: resp.Header.Get(HeaderActionsActivityID),
Err: fmt.Errorf("multiple runner group found with name %q", runnerGroup),
}
} }
return &runnerGroupList.RunnerGroups[0], nil return &runnerGroupList.RunnerGroups[0], nil
@@ -442,9 +485,12 @@ func (c *Client) CreateRunnerScaleSet(ctx context.Context, runnerScaleSet *Runne
return nil, ParseActionsErrorFromResponse(resp) return nil, ParseActionsErrorFromResponse(resp)
} }
var createdRunnerScaleSet *RunnerScaleSet var createdRunnerScaleSet *RunnerScaleSet
err = json.NewDecoder(resp.Body).Decode(&createdRunnerScaleSet) if err := json.NewDecoder(resp.Body).Decode(&createdRunnerScaleSet); err != nil {
if err != nil { return nil, &ActionsError{
return nil, err StatusCode: resp.StatusCode,
ActivityID: resp.Header.Get(HeaderActionsActivityID),
Err: err,
}
} }
return createdRunnerScaleSet, nil return createdRunnerScaleSet, nil
} }
@@ -472,9 +518,12 @@ func (c *Client) UpdateRunnerScaleSet(ctx context.Context, runnerScaleSetId int,
} }
var updatedRunnerScaleSet *RunnerScaleSet var updatedRunnerScaleSet *RunnerScaleSet
err = json.NewDecoder(resp.Body).Decode(&updatedRunnerScaleSet) if err := json.NewDecoder(resp.Body).Decode(&updatedRunnerScaleSet); err != nil {
if err != nil { return nil, &ActionsError{
return nil, err StatusCode: resp.StatusCode,
ActivityID: resp.Header.Get(HeaderActionsActivityID),
Err: err,
}
} }
return updatedRunnerScaleSet, nil return updatedRunnerScaleSet, nil
} }
@@ -499,7 +548,7 @@ func (c *Client) DeleteRunnerScaleSet(ctx context.Context, runnerScaleSetId int)
return nil return nil
} }
func (c *Client) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64) (*RunnerScaleSetMessage, error) { func (c *Client) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*RunnerScaleSetMessage, error) {
u, err := url.Parse(messageQueueUrl) u, err := url.Parse(messageQueueUrl)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -511,6 +560,10 @@ func (c *Client) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAc
u.RawQuery = q.Encode() u.RawQuery = q.Encode()
} }
if maxCapacity < 0 {
return nil, fmt.Errorf("maxCapacity must be greater than or equal to 0")
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -519,6 +572,7 @@ func (c *Client) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAc
req.Header.Set("Accept", "application/json; api-version=6.0-preview") req.Header.Set("Accept", "application/json; api-version=6.0-preview")
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", messageQueueAccessToken)) req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", messageQueueAccessToken))
req.Header.Set("User-Agent", c.userAgent.String()) req.Header.Set("User-Agent", c.userAgent.String())
req.Header.Set(HeaderScaleSetMaxCapacity, strconv.Itoa(maxCapacity))
resp, err := c.Do(req) resp, err := c.Do(req)
if err != nil { if err != nil {
@@ -539,15 +593,26 @@ func (c *Client) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAc
body, err := io.ReadAll(resp.Body) body, err := io.ReadAll(resp.Body)
body = trimByteOrderMark(body) body = trimByteOrderMark(body)
if err != nil { if err != nil {
return nil, err return nil, &ActionsError{
ActivityID: resp.Header.Get(HeaderActionsActivityID),
StatusCode: resp.StatusCode,
Err: err,
}
}
return nil, &MessageQueueTokenExpiredError{
activityID: resp.Header.Get(HeaderActionsActivityID),
statusCode: resp.StatusCode,
msg: string(body),
} }
return nil, &MessageQueueTokenExpiredError{msg: string(body)}
} }
var message *RunnerScaleSetMessage var message *RunnerScaleSetMessage
err = json.NewDecoder(resp.Body).Decode(&message) if err := json.NewDecoder(resp.Body).Decode(&message); err != nil {
if err != nil { return nil, &ActionsError{
return nil, err StatusCode: resp.StatusCode,
ActivityID: resp.Header.Get(HeaderActionsActivityID),
Err: err,
}
} }
return message, nil return message, nil
} }
@@ -583,9 +648,17 @@ func (c *Client) DeleteMessage(ctx context.Context, messageQueueUrl, messageQueu
body, err := io.ReadAll(resp.Body) body, err := io.ReadAll(resp.Body)
body = trimByteOrderMark(body) body = trimByteOrderMark(body)
if err != nil { if err != nil {
return err return &ActionsError{
ActivityID: resp.Header.Get(HeaderActionsActivityID),
StatusCode: resp.StatusCode,
Err: err,
}
}
return &MessageQueueTokenExpiredError{
activityID: resp.Header.Get(HeaderActionsActivityID),
statusCode: resp.StatusCode,
msg: string(body),
} }
return &MessageQueueTokenExpiredError{msg: string(body)}
} }
return nil return nil
} }
@@ -632,8 +705,20 @@ func (c *Client) doSessionRequest(ctx context.Context, method, path string, requ
return err return err
} }
if resp.StatusCode == expectedResponseStatusCode && responseUnmarshalTarget != nil { if resp.StatusCode == expectedResponseStatusCode {
return json.NewDecoder(resp.Body).Decode(responseUnmarshalTarget) if responseUnmarshalTarget == nil {
return nil
}
if err := json.NewDecoder(resp.Body).Decode(responseUnmarshalTarget); err != nil {
return &ActionsError{
StatusCode: resp.StatusCode,
ActivityID: resp.Header.Get(HeaderActionsActivityID),
Err: err,
}
}
return nil
} }
if resp.StatusCode >= 400 && resp.StatusCode < 500 { if resp.StatusCode >= 400 && resp.StatusCode < 500 {
@@ -644,10 +729,18 @@ func (c *Client) doSessionRequest(ctx context.Context, method, path string, requ
body, err := io.ReadAll(resp.Body) body, err := io.ReadAll(resp.Body)
body = trimByteOrderMark(body) body = trimByteOrderMark(body)
if err != nil { if err != nil {
return err return &ActionsError{
StatusCode: resp.StatusCode,
ActivityID: resp.Header.Get(HeaderActionsActivityID),
Err: err,
}
} }
return fmt.Errorf("unexpected status code: %d - body: %s", resp.StatusCode, string(body)) return fmt.Errorf("unexpected status code: %w", &ActionsError{
StatusCode: resp.StatusCode,
ActivityID: resp.Header.Get(HeaderActionsActivityID),
Err: errors.New(string(body)),
})
} }
func (c *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) { func (c *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) {
@@ -681,16 +774,28 @@ func (c *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQ
body, err := io.ReadAll(resp.Body) body, err := io.ReadAll(resp.Body)
body = trimByteOrderMark(body) body = trimByteOrderMark(body)
if err != nil { if err != nil {
return nil, err return nil, &ActionsError{
ActivityID: resp.Header.Get(HeaderActionsActivityID),
StatusCode: resp.StatusCode,
Err: err,
}
} }
return nil, &MessageQueueTokenExpiredError{msg: string(body)} return nil, &MessageQueueTokenExpiredError{
activityID: resp.Header.Get(HeaderActionsActivityID),
statusCode: resp.StatusCode,
msg: string(body),
}
} }
var acquiredJobs *Int64List var acquiredJobs *Int64List
err = json.NewDecoder(resp.Body).Decode(&acquiredJobs) err = json.NewDecoder(resp.Body).Decode(&acquiredJobs)
if err != nil { if err != nil {
return nil, err return nil, &ActionsError{
ActivityID: resp.Header.Get(HeaderActionsActivityID),
StatusCode: resp.StatusCode,
Err: err,
}
} }
return acquiredJobs.Value, nil return acquiredJobs.Value, nil
@@ -721,7 +826,11 @@ func (c *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*
var acquirableJobList *AcquirableJobList var acquirableJobList *AcquirableJobList
err = json.NewDecoder(resp.Body).Decode(&acquirableJobList) err = json.NewDecoder(resp.Body).Decode(&acquirableJobList)
if err != nil { if err != nil {
return nil, err return nil, &ActionsError{
StatusCode: resp.StatusCode,
ActivityID: resp.Header.Get(HeaderActionsActivityID),
Err: err,
}
} }
return acquirableJobList, nil return acquirableJobList, nil
@@ -750,9 +859,12 @@ func (c *Client) GenerateJitRunnerConfig(ctx context.Context, jitRunnerSetting *
} }
var runnerJitConfig *RunnerScaleSetJitRunnerConfig var runnerJitConfig *RunnerScaleSetJitRunnerConfig
err = json.NewDecoder(resp.Body).Decode(&runnerJitConfig) if err := json.NewDecoder(resp.Body).Decode(&runnerJitConfig); err != nil {
if err != nil { return nil, &ActionsError{
return nil, err StatusCode: resp.StatusCode,
ActivityID: resp.Header.Get(HeaderActionsActivityID),
Err: err,
}
} }
return runnerJitConfig, nil return runnerJitConfig, nil
} }
@@ -775,9 +887,12 @@ func (c *Client) GetRunner(ctx context.Context, runnerId int64) (*RunnerReferenc
} }
var runnerReference *RunnerReference var runnerReference *RunnerReference
err = json.NewDecoder(resp.Body).Decode(&runnerReference) if err := json.NewDecoder(resp.Body).Decode(&runnerReference); err != nil {
if err != nil { return nil, &ActionsError{
return nil, err StatusCode: resp.StatusCode,
ActivityID: resp.Header.Get(HeaderActionsActivityID),
Err: err,
}
} }
return runnerReference, nil return runnerReference, nil
@@ -801,9 +916,12 @@ func (c *Client) GetRunnerByName(ctx context.Context, runnerName string) (*Runne
} }
var runnerList *RunnerReferenceList var runnerList *RunnerReferenceList
err = json.NewDecoder(resp.Body).Decode(&runnerList) if err := json.NewDecoder(resp.Body).Decode(&runnerList); err != nil {
if err != nil { return nil, &ActionsError{
return nil, err StatusCode: resp.StatusCode,
ActivityID: resp.Header.Get(HeaderActionsActivityID),
Err: err,
}
} }
if runnerList.Count == 0 { if runnerList.Count == 0 {
@@ -811,7 +929,11 @@ func (c *Client) GetRunnerByName(ctx context.Context, runnerName string) (*Runne
} }
if runnerList.Count > 1 { if runnerList.Count > 1 {
return nil, fmt.Errorf("multiple runner found with name %s", runnerName) return nil, &ActionsError{
StatusCode: resp.StatusCode,
ActivityID: resp.Header.Get(HeaderActionsActivityID),
Err: fmt.Errorf("multiple runner found with name %s", runnerName),
}
} }
return &runnerList.RunnerReferences[0], nil return &runnerList.RunnerReferences[0], nil
@@ -884,12 +1006,20 @@ func (c *Client) getRunnerRegistrationToken(ctx context.Context) (*registrationT
if err != nil { if err != nil {
return nil, err return nil, err
} }
return nil, fmt.Errorf("unexpected response from Actions service during registration token call: %v - %v", resp.StatusCode, string(body)) return nil, &GitHubAPIError{
StatusCode: resp.StatusCode,
RequestID: resp.Header.Get(HeaderGitHubRequestID),
Err: errors.New(string(body)),
}
} }
var registrationToken *registrationToken var registrationToken *registrationToken
if err := json.NewDecoder(resp.Body).Decode(&registrationToken); err != nil { if err := json.NewDecoder(resp.Body).Decode(&registrationToken); err != nil {
return nil, err return nil, &GitHubAPIError{
StatusCode: resp.StatusCode,
RequestID: resp.Header.Get(HeaderGitHubRequestID),
Err: err,
}
} }
return registrationToken, nil return registrationToken, nil
@@ -926,8 +1056,14 @@ func (c *Client) fetchAccessToken(ctx context.Context, gitHubConfigURL string, c
// Format: https://docs.github.com/en/rest/apps/apps#create-an-installation-access-token-for-an-app // Format: https://docs.github.com/en/rest/apps/apps#create-an-installation-access-token-for-an-app
var accessToken *accessToken var accessToken *accessToken
err = json.NewDecoder(resp.Body).Decode(&accessToken) if err = json.NewDecoder(resp.Body).Decode(&accessToken); err != nil {
return accessToken, err return nil, &GitHubAPIError{
StatusCode: resp.StatusCode,
RequestID: resp.Header.Get(HeaderGitHubRequestID),
Err: err,
}
}
return accessToken, nil
} }
type ActionsServiceAdminConnection struct { type ActionsServiceAdminConnection struct {
@@ -964,25 +1100,55 @@ func (c *Client) getActionsServiceAdminConnection(ctx context.Context, rt *regis
c.logger.Info("getting Actions tenant URL and JWT", "registrationURL", req.URL.String()) c.logger.Info("getting Actions tenant URL and JWT", "registrationURL", req.URL.String())
resp, err := c.Do(req) var resp *http.Response
if err != nil { retry := 0
return nil, err for {
} var err error
defer resp.Body.Close() resp, err = c.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode > 299 { if resp.StatusCode >= 200 && resp.StatusCode <= 299 {
registrationErr := fmt.Errorf("unexpected response from Actions service during registration call: %v", resp.StatusCode) break
}
var innerErr error
body, err := io.ReadAll(resp.Body) body, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return nil, fmt.Errorf("%v - %v", registrationErr, err) innerErr = err
} else {
innerErr = errors.New(string(body))
} }
return nil, fmt.Errorf("%v - %v", registrationErr, string(body))
if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden {
return nil, &GitHubAPIError{
StatusCode: resp.StatusCode,
RequestID: resp.Header.Get(HeaderGitHubRequestID),
Err: innerErr,
}
}
retry++
if retry > 3 {
return nil, fmt.Errorf("unable to register runner after 3 retries: %w", &GitHubAPIError{
StatusCode: resp.StatusCode,
RequestID: resp.Header.Get(HeaderGitHubRequestID),
Err: innerErr,
})
}
time.Sleep(time.Duration(500 * int(time.Millisecond) * (retry + 1)))
} }
var actionsServiceAdminConnection *ActionsServiceAdminConnection var actionsServiceAdminConnection *ActionsServiceAdminConnection
if err := json.NewDecoder(resp.Body).Decode(&actionsServiceAdminConnection); err != nil { if err := json.NewDecoder(resp.Body).Decode(&actionsServiceAdminConnection); err != nil {
return nil, err return nil, &GitHubAPIError{
StatusCode: resp.StatusCode,
RequestID: resp.Header.Get(HeaderGitHubRequestID),
Err: err,
}
} }
return actionsServiceAdminConnection, nil return actionsServiceAdminConnection, nil

View File

@@ -5,6 +5,7 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"net/http" "net/http"
"strconv"
"testing" "testing"
"time" "time"
@@ -35,7 +36,7 @@ func TestGetMessage(t *testing.T) {
client, err := actions.NewClient(s.configURLForOrg("my-org"), auth) client, err := actions.NewClient(s.configURLForOrg("my-org"), auth)
require.NoError(t, err) require.NoError(t, err)
got, err := client.GetMessage(ctx, s.URL, token, 0) got, err := client.GetMessage(ctx, s.URL, token, 0, 10)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, want, got) assert.Equal(t, want, got)
}) })
@@ -52,7 +53,7 @@ func TestGetMessage(t *testing.T) {
client, err := actions.NewClient(s.configURLForOrg("my-org"), auth) client, err := actions.NewClient(s.configURLForOrg("my-org"), auth)
require.NoError(t, err) require.NoError(t, err)
got, err := client.GetMessage(ctx, s.URL, token, 1) got, err := client.GetMessage(ctx, s.URL, token, 1, 10)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, want, got) assert.Equal(t, want, got)
}) })
@@ -76,7 +77,7 @@ func TestGetMessage(t *testing.T) {
) )
require.NoError(t, err) require.NoError(t, err)
_, err = client.GetMessage(ctx, server.URL, token, 0) _, err = client.GetMessage(ctx, server.URL, token, 0, 10)
assert.NotNil(t, err) assert.NotNil(t, err)
assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry)
}) })
@@ -89,7 +90,7 @@ func TestGetMessage(t *testing.T) {
client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) client, err := actions.NewClient(server.configURLForOrg("my-org"), auth)
require.NoError(t, err) require.NoError(t, err)
_, err = client.GetMessage(ctx, server.URL, token, 0) _, err = client.GetMessage(ctx, server.URL, token, 0, 10)
require.NotNil(t, err) require.NotNil(t, err)
var expectedErr *actions.MessageQueueTokenExpiredError var expectedErr *actions.MessageQueueTokenExpiredError
@@ -98,7 +99,7 @@ func TestGetMessage(t *testing.T) {
t.Run("Status code not found", func(t *testing.T) { t.Run("Status code not found", func(t *testing.T) {
want := actions.ActionsError{ want := actions.ActionsError{
Message: "Request returned status: 404 Not Found", Err: errors.New("unknown exception"),
StatusCode: 404, StatusCode: 404,
} }
server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
@@ -108,7 +109,7 @@ func TestGetMessage(t *testing.T) {
client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) client, err := actions.NewClient(server.configURLForOrg("my-org"), auth)
require.NoError(t, err) require.NoError(t, err)
_, err = client.GetMessage(ctx, server.URL, token, 0) _, err = client.GetMessage(ctx, server.URL, token, 0, 10)
require.NotNil(t, err) require.NotNil(t, err)
assert.Equal(t, want.Error(), err.Error()) assert.Equal(t, want.Error(), err.Error())
}) })
@@ -122,9 +123,35 @@ func TestGetMessage(t *testing.T) {
client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) client, err := actions.NewClient(server.configURLForOrg("my-org"), auth)
require.NoError(t, err) require.NoError(t, err)
_, err = client.GetMessage(ctx, server.URL, token, 0) _, err = client.GetMessage(ctx, server.URL, token, 0, 10)
assert.NotNil(t, err) assert.NotNil(t, err)
}) })
t.Run("Capacity error handling", func(t *testing.T) {
server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
hc := r.Header.Get(actions.HeaderScaleSetMaxCapacity)
c, err := strconv.Atoi(hc)
require.NoError(t, err)
assert.GreaterOrEqual(t, c, 0)
w.WriteHeader(http.StatusBadRequest)
w.Header().Set("Content-Type", "text/plain")
}))
client, err := actions.NewClient(server.configURLForOrg("my-org"), auth)
require.NoError(t, err)
_, err = client.GetMessage(ctx, server.URL, token, 0, -1)
require.Error(t, err)
// Ensure we don't send requests with negative capacity
assert.False(t, errors.Is(err, &actions.ActionsError{}))
_, err = client.GetMessage(ctx, server.URL, token, 0, 0)
assert.Error(t, err)
var expectedErr *actions.ActionsError
assert.ErrorAs(t, err, &expectedErr)
assert.Equal(t, http.StatusBadRequest, expectedErr.StatusCode)
})
} }
func TestDeleteMessage(t *testing.T) { func TestDeleteMessage(t *testing.T) {

View File

@@ -13,6 +13,8 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
const exampleRequestID = "5ddf2050-dae0-013c-9159-04421ad31b68"
func TestCreateMessageSession(t *testing.T) { func TestCreateMessageSession(t *testing.T) {
ctx := context.Background() ctx := context.Background()
auth := &actions.ActionsAuth{ auth := &actions.ActionsAuth{
@@ -69,13 +71,17 @@ func TestCreateMessageSession(t *testing.T) {
} }
want := &actions.ActionsError{ want := &actions.ActionsError{
ExceptionName: "CSharpExceptionNameHere", ActivityID: exampleRequestID,
Message: "could not do something", StatusCode: http.StatusBadRequest,
StatusCode: http.StatusBadRequest, Err: &actions.ActionsExceptionError{
ExceptionName: "CSharpExceptionNameHere",
Message: "could not do something",
},
} }
server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
w.Header().Set(actions.HeaderActionsActivityID, exampleRequestID)
w.WriteHeader(http.StatusBadRequest) w.WriteHeader(http.StatusBadRequest)
resp := []byte(`{"typeName": "CSharpExceptionNameHere","message": "could not do something"}`) resp := []byte(`{"typeName": "CSharpExceptionNameHere","message": "could not do something"}`)
w.Write(resp) w.Write(resp)

Some files were not shown because too many files have changed in this diff Show More