mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-12 20:46:47 +00:00
Compare commits
7 Commits
actions-ru
...
actions-ru
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0fce761686 | ||
|
|
c88ff44518 | ||
|
|
2fdf35ac9d | ||
|
|
6cce3fefc5 | ||
|
|
eb2eaf8130 | ||
|
|
7bf712d0d4 | ||
|
|
7d024a6c05 |
25
.github/workflows/build-and-release-runners.yml
vendored
25
.github/workflows/build-and-release-runners.yml
vendored
@@ -16,9 +16,7 @@ on:
|
||||
- runner/dindrunner.Dockerfile
|
||||
- runner/entrypoint.sh
|
||||
- .github/workflows/build-runner.yml
|
||||
release:
|
||||
types: [published]
|
||||
name: Runner
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -57,31 +55,12 @@ jobs:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||
|
||||
# Considered unstable builds
|
||||
# Mutable (no sha) and immutable (include sha) tags are created, see Issue 285 and PR 286 for why
|
||||
- name: Build and push canary builds
|
||||
- name: Build and Push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./runner
|
||||
file: ./runner/${{ matrix.dockerfile }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'pull_request' && github.event_name != 'release' }}
|
||||
build-args: |
|
||||
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
|
||||
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
|
||||
tags: |
|
||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-canary
|
||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-canary-${{ steps.vars.outputs.sha_short }}
|
||||
|
||||
# Considered stable builds
|
||||
# Mutable (no sha) and immutable (include sha) tags are created, see Issue 285 and PR 286 for why
|
||||
- name: Build and push release builds
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./runner
|
||||
file: ./runner/${{ matrix.dockerfile }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name == 'release' }}
|
||||
build-args: |
|
||||
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
|
||||
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
|
||||
|
||||
1
.github/workflows/release.yml
vendored
1
.github/workflows/release.yml
vendored
@@ -57,6 +57,7 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:latest
|
||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}
|
||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}-${{ steps.vars.outputs.sha_short }}
|
||||
|
||||
|
||||
6
.github/workflows/wip.yml
vendored
6
.github/workflows/wip.yml
vendored
@@ -30,11 +30,13 @@ jobs:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||
|
||||
# Considered unstable builds
|
||||
# See Issue #285, PR #286, and PR #323 for more information
|
||||
- name: Build and Push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:latest
|
||||
|
||||
tags: |
|
||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:canary
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# actions-runner-controller
|
||||
|
||||
[](https://github.com/jonico/awesome-runners)
|
||||
|
||||
This controller operates self-hosted runners for GitHub Actions on your Kubernetes cluster.
|
||||
|
||||
ToC:
|
||||
@@ -393,7 +395,7 @@ spec:
|
||||
```
|
||||
|
||||
With the above example, the webhook server scales `myrunners` by `1` replica for 5 minutes on each `check_run` event
|
||||
with the type of `created` and the status of `queued` received.
|
||||
with the type of `created` and the status of `queued` received.
|
||||
|
||||
The primary benefit of autoscaling on Webhook compared to the standard autoscaling is that this one allows you to
|
||||
immediately add "resource slack" for future GitHub Actions job runs.
|
||||
@@ -529,14 +531,14 @@ spec:
|
||||
requests:
|
||||
cpu: "2.0"
|
||||
memory: "4Gi"
|
||||
|
||||
|
||||
# Timeout after a node crashed or became unreachable to evict your pods somewhere else (default 5mins)
|
||||
tolerations:
|
||||
- key: "node.kubernetes.io/unreachable"
|
||||
operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
tolerationSeconds: 10
|
||||
|
||||
|
||||
# If set to false, there are no privileged container and you cannot use docker.
|
||||
dockerEnabled: false
|
||||
# If set to true, runner pod container only 1 container that's expected to be able to run docker, too.
|
||||
|
||||
@@ -15,7 +15,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.5.0
|
||||
version: 0.5.1
|
||||
|
||||
home: https://github.com/summerwind/actions-runner-controller
|
||||
|
||||
|
||||
@@ -85,11 +85,11 @@ Create the name of the service account to use
|
||||
{{- end }}
|
||||
|
||||
{{- define "actions-runner-controller.webhookServiceName" -}}
|
||||
{{- include "actions-runner-controller.fullname" . }}-webhook
|
||||
{{- include "actions-runner-controller.fullname" . | trunc 55 }}-webhook
|
||||
{{- end }}
|
||||
|
||||
{{- define "actions-runner-controller.authProxyServiceName" -}}
|
||||
{{- include "actions-runner-controller.fullname" . }}-metrics-service
|
||||
{{- include "actions-runner-controller.fullname" . | trunc 47 }}-metrics-service
|
||||
{{- end }}
|
||||
|
||||
{{- define "actions-runner-controller.selfsignedIssuerName" -}}
|
||||
|
||||
@@ -40,14 +40,18 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
|
||||
metav1Now := metav1.Now()
|
||||
testcases := []struct {
|
||||
repo string
|
||||
org string
|
||||
fixed *int
|
||||
max *int
|
||||
min *int
|
||||
sReplicas *int
|
||||
sTime *metav1.Time
|
||||
workflowRuns string
|
||||
repo string
|
||||
org string
|
||||
fixed *int
|
||||
max *int
|
||||
min *int
|
||||
sReplicas *int
|
||||
sTime *metav1.Time
|
||||
|
||||
workflowRuns string
|
||||
workflowRuns_queued string
|
||||
workflowRuns_in_progress string
|
||||
|
||||
workflowJobs map[int]string
|
||||
want int
|
||||
err string
|
||||
@@ -55,87 +59,107 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
// Legacy functionality
|
||||
// 3 demanded, max at 3
|
||||
{
|
||||
repo: "test/valid",
|
||||
min: intPtr(2),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
want: 3,
|
||||
repo: "test/valid",
|
||||
min: intPtr(2),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||
want: 3,
|
||||
},
|
||||
// 2 demanded, max at 3, currently 3, delay scaling down due to grace period
|
||||
{
|
||||
repo: "test/valid",
|
||||
min: intPtr(2),
|
||||
max: intPtr(3),
|
||||
sReplicas: intPtr(3),
|
||||
sTime: &metav1Now,
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
want: 3,
|
||||
repo: "test/valid",
|
||||
min: intPtr(2),
|
||||
max: intPtr(3),
|
||||
sReplicas: intPtr(3),
|
||||
sTime: &metav1Now,
|
||||
workflowRuns: `{"total_count": 3, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||
want: 3,
|
||||
},
|
||||
// 3 demanded, max at 2
|
||||
{
|
||||
repo: "test/valid",
|
||||
min: intPtr(2),
|
||||
max: intPtr(2),
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
want: 2,
|
||||
repo: "test/valid",
|
||||
min: intPtr(2),
|
||||
max: intPtr(2),
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||
want: 2,
|
||||
},
|
||||
// 2 demanded, min at 2
|
||||
{
|
||||
repo: "test/valid",
|
||||
min: intPtr(2),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 3, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
want: 2,
|
||||
repo: "test/valid",
|
||||
min: intPtr(2),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 3, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||
want: 2,
|
||||
},
|
||||
// 1 demanded, min at 2
|
||||
{
|
||||
repo: "test/valid",
|
||||
min: intPtr(2),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
||||
want: 2,
|
||||
repo: "test/valid",
|
||||
min: intPtr(2),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||
want: 2,
|
||||
},
|
||||
// 1 demanded, min at 2
|
||||
{
|
||||
repo: "test/valid",
|
||||
min: intPtr(2),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
want: 2,
|
||||
repo: "test/valid",
|
||||
min: intPtr(2),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||
want: 2,
|
||||
},
|
||||
// 1 demanded, min at 1
|
||||
{
|
||||
repo: "test/valid",
|
||||
min: intPtr(1),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
||||
want: 1,
|
||||
repo: "test/valid",
|
||||
min: intPtr(1),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||
want: 1,
|
||||
},
|
||||
// 1 demanded, min at 1
|
||||
{
|
||||
repo: "test/valid",
|
||||
min: intPtr(1),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
want: 1,
|
||||
repo: "test/valid",
|
||||
min: intPtr(1),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||
want: 1,
|
||||
},
|
||||
// fixed at 3
|
||||
{
|
||||
repo: "test/valid",
|
||||
min: intPtr(1),
|
||||
max: intPtr(3),
|
||||
fixed: intPtr(3),
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
want: 3,
|
||||
repo: "test/valid",
|
||||
min: intPtr(1),
|
||||
max: intPtr(3),
|
||||
fixed: intPtr(3),
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||
want: 3,
|
||||
},
|
||||
|
||||
// Job-level autoscaling
|
||||
// 5 requested from 3 workflows
|
||||
{
|
||||
repo: "test/valid",
|
||||
min: intPtr(2),
|
||||
max: intPtr(10),
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"id": 1, "status":"queued"}, {"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
repo: "test/valid",
|
||||
min: intPtr(2),
|
||||
max: intPtr(10),
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"id": 1, "status":"queued"}, {"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"id": 1, "status":"queued"}]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}]}"`,
|
||||
workflowJobs: map[int]string{
|
||||
1: `{"jobs": [{"status":"queued"}, {"status":"queued"}]}`,
|
||||
2: `{"jobs": [{"status": "in_progress"}, {"status":"completed"}]}`,
|
||||
@@ -158,7 +182,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
|
||||
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
|
||||
server := fake.NewServer(
|
||||
fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns),
|
||||
fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns, tc.workflowRuns_queued, tc.workflowRuns_in_progress),
|
||||
fake.WithListWorkflowJobsResponse(200, tc.workflowJobs),
|
||||
fake.WithListRunnersResponse(200, fake.RunnersListBody),
|
||||
)
|
||||
@@ -228,129 +252,157 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
|
||||
metav1Now := metav1.Now()
|
||||
testcases := []struct {
|
||||
repos []string
|
||||
org string
|
||||
fixed *int
|
||||
max *int
|
||||
min *int
|
||||
sReplicas *int
|
||||
sTime *metav1.Time
|
||||
workflowRuns string
|
||||
repos []string
|
||||
org string
|
||||
fixed *int
|
||||
max *int
|
||||
min *int
|
||||
sReplicas *int
|
||||
sTime *metav1.Time
|
||||
|
||||
workflowRuns string
|
||||
workflowRuns_queued string
|
||||
workflowRuns_in_progress string
|
||||
|
||||
workflowJobs map[int]string
|
||||
want int
|
||||
err string
|
||||
}{
|
||||
// 3 demanded, max at 3
|
||||
{
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
min: intPtr(2),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
want: 3,
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
min: intPtr(2),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||
want: 3,
|
||||
},
|
||||
// 2 demanded, max at 3, currently 3, delay scaling down due to grace period
|
||||
{
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
min: intPtr(2),
|
||||
max: intPtr(3),
|
||||
sReplicas: intPtr(3),
|
||||
sTime: &metav1Now,
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
want: 3,
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
min: intPtr(2),
|
||||
max: intPtr(3),
|
||||
sReplicas: intPtr(3),
|
||||
sTime: &metav1Now,
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||
want: 3,
|
||||
},
|
||||
// 3 demanded, max at 2
|
||||
{
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
min: intPtr(2),
|
||||
max: intPtr(2),
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
want: 2,
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
min: intPtr(2),
|
||||
max: intPtr(2),
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||
want: 2,
|
||||
},
|
||||
// 2 demanded, min at 2
|
||||
{
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
min: intPtr(2),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 3, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
want: 2,
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
min: intPtr(2),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 3, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||
want: 2,
|
||||
},
|
||||
// 1 demanded, min at 2
|
||||
{
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
min: intPtr(2),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
||||
want: 2,
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
min: intPtr(2),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||
want: 2,
|
||||
},
|
||||
// 1 demanded, min at 2
|
||||
{
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
min: intPtr(2),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
want: 2,
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
min: intPtr(2),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||
want: 2,
|
||||
},
|
||||
// 1 demanded, min at 1
|
||||
{
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
min: intPtr(1),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
||||
want: 1,
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
min: intPtr(1),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||
want: 1,
|
||||
},
|
||||
// 1 demanded, min at 1
|
||||
{
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
min: intPtr(1),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
want: 1,
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
min: intPtr(1),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||
want: 1,
|
||||
},
|
||||
// fixed at 3
|
||||
{
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
fixed: intPtr(1),
|
||||
min: intPtr(1),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
want: 3,
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
fixed: intPtr(1),
|
||||
min: intPtr(1),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"},{"status":"in_progress"},{"status":"in_progress"}]}"`,
|
||||
want: 3,
|
||||
},
|
||||
// org runner, fixed at 3
|
||||
{
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
fixed: intPtr(1),
|
||||
min: intPtr(1),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
want: 3,
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
fixed: intPtr(1),
|
||||
min: intPtr(1),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"},{"status":"in_progress"},{"status":"in_progress"}]}"`,
|
||||
want: 3,
|
||||
},
|
||||
// org runner, 1 demanded, min at 1, no repos
|
||||
{
|
||||
org: "test",
|
||||
min: intPtr(1),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
err: "validating autoscaling metrics: spec.autoscaling.metrics[].repositoryNames is required and must have one more more entries for organizational runner deployment",
|
||||
org: "test",
|
||||
min: intPtr(1),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||
err: "validating autoscaling metrics: spec.autoscaling.metrics[].repositoryNames is required and must have one more more entries for organizational runner deployment",
|
||||
},
|
||||
|
||||
// Job-level autoscaling
|
||||
// 5 requested from 3 workflows
|
||||
{
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
min: intPtr(2),
|
||||
max: intPtr(10),
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"id": 1, "status":"queued"}, {"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
min: intPtr(2),
|
||||
max: intPtr(10),
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"id": 1, "status":"queued"}, {"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"id": 1, "status":"queued"}]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowJobs: map[int]string{
|
||||
1: `{"jobs": [{"status":"queued"}, {"status":"queued"}]}`,
|
||||
2: `{"jobs": [{"status": "in_progress"}, {"status":"completed"}]}`,
|
||||
@@ -373,7 +425,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
|
||||
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
|
||||
server := fake.NewServer(
|
||||
fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns),
|
||||
fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns, tc.workflowRuns_queued, tc.workflowRuns_in_progress),
|
||||
fake.WithListWorkflowJobsResponse(200, tc.workflowJobs),
|
||||
fake.WithListRunnersResponse(200, fake.RunnersListBody),
|
||||
)
|
||||
|
||||
@@ -349,7 +349,8 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) tryScaleUp(ctx contex
|
||||
}
|
||||
|
||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) SetupWithManager(mgr ctrl.Manager) error {
|
||||
autoscaler.Recorder = mgr.GetEventRecorderFor("webhookbasedautoscaler")
|
||||
name := "webhookbasedautoscaler"
|
||||
autoscaler.Recorder = mgr.GetEventRecorderFor(name)
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(&v1alpha1.HorizontalRunnerAutoscaler{}, scaleTargetKey, func(rawObj runtime.Object) []string {
|
||||
hra := rawObj.(*v1alpha1.HorizontalRunnerAutoscaler)
|
||||
@@ -371,5 +372,6 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) SetupWithManager(mgr
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.HorizontalRunnerAutoscaler{}).
|
||||
Named(name).
|
||||
Complete(autoscaler)
|
||||
}
|
||||
|
||||
@@ -183,10 +183,12 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
r.Recorder = mgr.GetEventRecorderFor("horizontalrunnerautoscaler-controller")
|
||||
name := "horizontalrunnerautoscaler-controller"
|
||||
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.HorizontalRunnerAutoscaler{}).
|
||||
Named(name).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,11 +2,14 @@ package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/google/go-github/v33/github"
|
||||
github3 "github.com/google/go-github/v33/github"
|
||||
github2 "github.com/summerwind/actions-runner-controller/github"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/summerwind/actions-runner-controller/github/fake"
|
||||
@@ -31,8 +34,12 @@ type testEnvironment struct {
|
||||
}
|
||||
|
||||
var (
|
||||
workflowRunsFor3Replicas = `{"total_count": 5, "workflow_runs":[{"status":"queued"}, {"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`
|
||||
workflowRunsFor1Replicas = `{"total_count": 6, "workflow_runs":[{"status":"queued"}, {"status":"completed"}, {"status":"completed"}, {"status":"completed"}, {"status":"completed"}]}"`
|
||||
workflowRunsFor3Replicas = `{"total_count": 5, "workflow_runs":[{"status":"queued"}, {"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`
|
||||
workflowRunsFor3Replicas_queued = `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"queued"}]}"`
|
||||
workflowRunsFor3Replicas_in_progress = `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`
|
||||
workflowRunsFor1Replicas = `{"total_count": 6, "workflow_runs":[{"status":"queued"}, {"status":"completed"}, {"status":"completed"}, {"status":"completed"}, {"status":"completed"}]}"`
|
||||
workflowRunsFor1Replicas_queued = `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`
|
||||
workflowRunsFor1Replicas_in_progress = `{"total_count": 0, "workflow_runs":[]}"`
|
||||
)
|
||||
|
||||
var webhookServer *httptest.Server
|
||||
@@ -56,6 +63,10 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
||||
responses.ListRepositoryWorkflowRuns = &fake.Handler{
|
||||
Status: 200,
|
||||
Body: workflowRunsFor3Replicas,
|
||||
Statuses: map[string]string{
|
||||
"queued": workflowRunsFor3Replicas_queued,
|
||||
"in_progress": workflowRunsFor3Replicas_in_progress,
|
||||
},
|
||||
}
|
||||
fakeRunnerList = fake.NewRunnersList()
|
||||
responses.ListRunners = fakeRunnerList.HandleList()
|
||||
@@ -154,7 +165,7 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
name := "example-runnerdeploy"
|
||||
|
||||
{
|
||||
rs := &actionsv1alpha1.RunnerDeployment{
|
||||
rd := &actionsv1alpha1.RunnerDeployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
@@ -174,80 +185,17 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, rs)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create test RunnerDeployment resource")
|
||||
|
||||
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||
|
||||
Eventually(
|
||||
func() int {
|
||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "list runner sets")
|
||||
}
|
||||
|
||||
return len(runnerSets.Items)
|
||||
},
|
||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
|
||||
|
||||
Eventually(
|
||||
func() int {
|
||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "list runner sets")
|
||||
}
|
||||
|
||||
if len(runnerSets.Items) == 0 {
|
||||
logf.Log.Info("No runnerreplicasets exist yet")
|
||||
return -1
|
||||
}
|
||||
|
||||
return *runnerSets.Items[0].Spec.Replicas
|
||||
},
|
||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
|
||||
ExpectCreate(ctx, rd, "test RunnerDeployment")
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
}
|
||||
|
||||
{
|
||||
// We wrap the update in the Eventually block to avoid the below error that occurs due to concurrent modification
|
||||
// made by the controller to update .Status.AvailableReplicas and .Status.ReadyReplicas
|
||||
// Operation cannot be fulfilled on runnersets.actions.summerwind.dev "example-runnerset": the object has been modified; please apply your changes to the latest version and try again
|
||||
Eventually(func() error {
|
||||
var rd actionsv1alpha1.RunnerDeployment
|
||||
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns.Name, Name: name}, &rd)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get test RunnerDeployment resource")
|
||||
|
||||
ExpectRunnerDeploymentEventuallyUpdates(ctx, ns.Name, name, func(rd *actionsv1alpha1.RunnerDeployment) {
|
||||
rd.Spec.Replicas = intPtr(2)
|
||||
|
||||
return k8sClient.Update(ctx, &rd)
|
||||
},
|
||||
time.Second*1, time.Millisecond*500).Should(BeNil())
|
||||
|
||||
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||
|
||||
Eventually(
|
||||
func() int {
|
||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "list runner sets")
|
||||
}
|
||||
|
||||
return len(runnerSets.Items)
|
||||
},
|
||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
|
||||
|
||||
Eventually(
|
||||
func() int {
|
||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "list runner sets")
|
||||
}
|
||||
|
||||
return *runnerSets.Items[0].Spec.Replicas
|
||||
},
|
||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(2))
|
||||
})
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Namespace, 2)
|
||||
}
|
||||
|
||||
// Scale-up to 3 replicas
|
||||
@@ -280,38 +228,10 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, hra)
|
||||
ExpectCreate(ctx, hra, "test HorizontalRunnerAutoscaler")
|
||||
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create test HorizontalRunnerAutoscaler resource")
|
||||
|
||||
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||
|
||||
Eventually(
|
||||
func() int {
|
||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "list runner sets")
|
||||
}
|
||||
|
||||
return len(runnerSets.Items)
|
||||
},
|
||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
|
||||
|
||||
Eventually(
|
||||
func() int {
|
||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "list runner sets")
|
||||
}
|
||||
|
||||
if len(runnerSets.Items) == 0 {
|
||||
logf.Log.Info("No runnerreplicasets exist yet")
|
||||
return -1
|
||||
}
|
||||
|
||||
return *runnerSets.Items[0].Spec.Replicas
|
||||
},
|
||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(3))
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3)
|
||||
}
|
||||
|
||||
{
|
||||
@@ -342,6 +262,8 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
time.Sleep(time.Second)
|
||||
|
||||
responses.ListRepositoryWorkflowRuns.Body = workflowRunsFor1Replicas
|
||||
responses.ListRepositoryWorkflowRuns.Statuses["queued"] = workflowRunsFor1Replicas_queued
|
||||
responses.ListRepositoryWorkflowRuns.Statuses["in_progress"] = workflowRunsFor1Replicas_in_progress
|
||||
|
||||
var hra actionsv1alpha1.HorizontalRunnerAutoscaler
|
||||
|
||||
@@ -357,77 +279,107 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get test HorizontalRunnerAutoscaler resource")
|
||||
|
||||
Eventually(
|
||||
func() int {
|
||||
var runnerSets actionsv1alpha1.RunnerReplicaSetList
|
||||
|
||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "list runner sets")
|
||||
}
|
||||
|
||||
if len(runnerSets.Items) == 0 {
|
||||
logf.Log.Info("No runnerreplicasets exist yet")
|
||||
return -1
|
||||
}
|
||||
|
||||
return *runnerSets.Items[0].Spec.Replicas
|
||||
},
|
||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1), "runners after HRA force update for scale-down")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1, "runners after HRA force update for scale-down")
|
||||
}
|
||||
|
||||
// Scale-up to 2 replicas on first pull_request create webhook event
|
||||
{
|
||||
resp, err := sendWebhook(webhookServer, "pull_request", &github.PullRequestEvent{
|
||||
PullRequest: &github.PullRequest{
|
||||
Base: &github.PullRequestBranch{
|
||||
Ref: github.String("main"),
|
||||
},
|
||||
},
|
||||
Repo: &github.Repository{
|
||||
Name: github.String("test/valid"),
|
||||
Organization: &github.Organization{
|
||||
Name: github.String("test"),
|
||||
},
|
||||
},
|
||||
Action: github.String("created"),
|
||||
})
|
||||
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to send pull_request event")
|
||||
|
||||
Expect(resp.StatusCode).To(Equal(200))
|
||||
SendPullRequestEvent("test/valid", "main", "created")
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1, "runner sets after webhook")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 2, "runners after first webhook event")
|
||||
}
|
||||
|
||||
// Scale-up to 2 replicas
|
||||
// Scale-up to 3 replicas on second pull_request create webhook event
|
||||
{
|
||||
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||
|
||||
Eventually(
|
||||
func() int {
|
||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "list runner sets")
|
||||
}
|
||||
|
||||
return len(runnerSets.Items)
|
||||
},
|
||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1), "runner sets after webhook")
|
||||
|
||||
Eventually(
|
||||
func() int {
|
||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "list runner sets")
|
||||
}
|
||||
|
||||
if len(runnerSets.Items) == 0 {
|
||||
logf.Log.Info("No runnerreplicasets exist yet")
|
||||
return -1
|
||||
}
|
||||
|
||||
return *runnerSets.Items[0].Spec.Replicas
|
||||
},
|
||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(2), "runners after webhook")
|
||||
SendPullRequestEvent("test/valid", "main", "created")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3, "runners after second webhook event")
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func SendPullRequestEvent(repo string, branch string, action string) {
|
||||
org := strings.Split(repo, "/")[0]
|
||||
|
||||
resp, err := sendWebhook(webhookServer, "pull_request", &github.PullRequestEvent{
|
||||
PullRequest: &github.PullRequest{
|
||||
Base: &github.PullRequestBranch{
|
||||
Ref: github.String(branch),
|
||||
},
|
||||
},
|
||||
Repo: &github.Repository{
|
||||
Name: github.String(repo),
|
||||
Organization: &github.Organization{
|
||||
Name: github.String(org),
|
||||
},
|
||||
},
|
||||
Action: github.String(action),
|
||||
})
|
||||
|
||||
ExpectWithOffset(1, err).NotTo(HaveOccurred(), "failed to send pull_request event")
|
||||
|
||||
ExpectWithOffset(1, resp.StatusCode).To(Equal(200))
|
||||
}
|
||||
|
||||
func ExpectCreate(ctx context.Context, rd runtime.Object, s string) {
|
||||
err := k8sClient.Create(ctx, rd)
|
||||
|
||||
ExpectWithOffset(1, err).NotTo(HaveOccurred(), fmt.Sprintf("failed to create %s resource", s))
|
||||
}
|
||||
|
||||
func ExpectRunnerDeploymentEventuallyUpdates(ctx context.Context, ns string, name string, f func(rd *actionsv1alpha1.RunnerDeployment)) {
|
||||
// We wrap the update in the Eventually block to avoid the below error that occurs due to concurrent modification
|
||||
// made by the controller to update .Status.AvailableReplicas and .Status.ReadyReplicas
|
||||
// Operation cannot be fulfilled on runnersets.actions.summerwind.dev "example-runnerset": the object has been modified; please apply your changes to the latest version and try again
|
||||
EventuallyWithOffset(
|
||||
1,
|
||||
func() error {
|
||||
var rd actionsv1alpha1.RunnerDeployment
|
||||
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: name}, &rd)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get test RunnerDeployment resource")
|
||||
|
||||
f(&rd)
|
||||
|
||||
return k8sClient.Update(ctx, &rd)
|
||||
},
|
||||
time.Second*1, time.Millisecond*500).Should(BeNil())
|
||||
}
|
||||
|
||||
func ExpectRunnerSetsCountEventuallyEquals(ctx context.Context, ns string, count int, optionalDescription ...interface{}) {
|
||||
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||
|
||||
EventuallyWithOffset(
|
||||
1,
|
||||
func() int {
|
||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns))
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "list runner sets")
|
||||
}
|
||||
|
||||
return len(runnerSets.Items)
|
||||
},
|
||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(count), optionalDescription...)
|
||||
}
|
||||
|
||||
func ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx context.Context, ns string, count int, optionalDescription ...interface{}) {
|
||||
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||
|
||||
EventuallyWithOffset(
|
||||
1,
|
||||
func() int {
|
||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns))
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "list runner sets")
|
||||
}
|
||||
|
||||
if len(runnerSets.Items) == 0 {
|
||||
logf.Log.Info("No runnerreplicasets exist yet")
|
||||
return -1
|
||||
}
|
||||
|
||||
return *runnerSets.Items[0].Spec.Replicas
|
||||
},
|
||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(count), optionalDescription...)
|
||||
}
|
||||
|
||||
@@ -655,11 +655,14 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
}
|
||||
|
||||
func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
r.Recorder = mgr.GetEventRecorderFor("runner-controller")
|
||||
name := "runner-controller"
|
||||
|
||||
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.Runner{}).
|
||||
Owns(&corev1.Pod{}).
|
||||
Named(name).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
|
||||
@@ -285,7 +285,8 @@ func (r *RunnerDeploymentReconciler) newRunnerReplicaSet(rd v1alpha1.RunnerDeplo
|
||||
}
|
||||
|
||||
func (r *RunnerDeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
r.Recorder = mgr.GetEventRecorderFor("runnerdeployment-controller")
|
||||
name := "runnerdeployment-controller"
|
||||
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(&v1alpha1.RunnerReplicaSet{}, runnerSetOwnerKey, func(rawObj runtime.Object) []string {
|
||||
runnerSet := rawObj.(*v1alpha1.RunnerReplicaSet)
|
||||
@@ -306,5 +307,6 @@ func (r *RunnerDeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.RunnerDeployment{}).
|
||||
Owns(&v1alpha1.RunnerReplicaSet{}).
|
||||
Named(name).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
@@ -221,10 +221,12 @@ func (r *RunnerReplicaSetReconciler) newRunner(rs v1alpha1.RunnerReplicaSet) (v1
|
||||
}
|
||||
|
||||
func (r *RunnerReplicaSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
r.Recorder = mgr.GetEventRecorderFor("runnerreplicaset-controller")
|
||||
name := "runnerreplicaset-controller"
|
||||
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.RunnerReplicaSet{}).
|
||||
Owns(&v1alpha1.Runner{}).
|
||||
Named(name).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
@@ -37,10 +37,21 @@ func (h *ListRunnersHandler) ServeHTTP(w http.ResponseWriter, req *http.Request)
|
||||
type Handler struct {
|
||||
Status int
|
||||
Body string
|
||||
|
||||
Statuses map[string]string
|
||||
}
|
||||
|
||||
func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
w.WriteHeader(h.Status)
|
||||
|
||||
status := req.URL.Query().Get("status")
|
||||
if h.Statuses != nil {
|
||||
if body, ok := h.Statuses[status]; ok {
|
||||
fmt.Fprintf(w, body)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, h.Body)
|
||||
}
|
||||
|
||||
|
||||
@@ -10,11 +10,15 @@ type FixedResponses struct {
|
||||
|
||||
type Option func(*ServerConfig)
|
||||
|
||||
func WithListRepositoryWorkflowRunsResponse(status int, body string) Option {
|
||||
func WithListRepositoryWorkflowRunsResponse(status int, body, queued, in_progress string) Option {
|
||||
return func(c *ServerConfig) {
|
||||
c.FixedResponses.ListRepositoryWorkflowRuns = &Handler{
|
||||
Status: status,
|
||||
Body: body,
|
||||
Statuses: map[string]string{
|
||||
"queued": queued,
|
||||
"in_progress": in_progress,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -210,12 +210,34 @@ func (c *Client) listRunners(ctx context.Context, enterprise, org, repo string,
|
||||
func (c *Client) ListRepositoryWorkflowRuns(ctx context.Context, user string, repoName string) ([]*github.WorkflowRun, error) {
|
||||
c.Client.Actions.ListRepositoryWorkflowRuns(ctx, user, repoName, nil)
|
||||
|
||||
queued, err := c.listRepositoryWorkflowRuns(ctx, user, repoName, "queued")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing queued workflow runs: %w", err)
|
||||
}
|
||||
|
||||
inProgress, err := c.listRepositoryWorkflowRuns(ctx, user, repoName, "in_progress")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing in_progress workflow runs: %w", err)
|
||||
}
|
||||
|
||||
var workflowRuns []*github.WorkflowRun
|
||||
|
||||
workflowRuns = append(workflowRuns, queued...)
|
||||
workflowRuns = append(workflowRuns, inProgress...)
|
||||
|
||||
return workflowRuns, nil
|
||||
}
|
||||
|
||||
func (c *Client) listRepositoryWorkflowRuns(ctx context.Context, user string, repoName, status string) ([]*github.WorkflowRun, error) {
|
||||
c.Client.Actions.ListRepositoryWorkflowRuns(ctx, user, repoName, nil)
|
||||
|
||||
var workflowRuns []*github.WorkflowRun
|
||||
|
||||
opts := github.ListWorkflowRunsOptions{
|
||||
ListOptions: github.ListOptions{
|
||||
PerPage: 100,
|
||||
},
|
||||
Status: status,
|
||||
}
|
||||
|
||||
for {
|
||||
|
||||
Reference in New Issue
Block a user