mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-11 03:57:01 +00:00
Compare commits
11 Commits
update-run
...
nikola-jok
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f914f627f1 | ||
|
|
2572fbcb1a | ||
|
|
416c9942f1 | ||
|
|
a9b60e4565 | ||
|
|
82d4ab8936 | ||
|
|
20c8c49046 | ||
|
|
c4cee5a195 | ||
|
|
445ef94796 | ||
|
|
819a9264a0 | ||
|
|
a9eb5b6c45 | ||
|
|
b3d135408f |
@@ -193,7 +193,7 @@ runs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug
|
helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug
|
||||||
kubectl wait --timeout=30s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-namespace}} -l app.kubernetes.io/instance=${{ inputs.arc-name }}
|
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-name}} -l app.kubernetes.io/instance=${{ inputs.arc-name }}
|
||||||
|
|
||||||
- name: Gather logs and cleanup
|
- name: Gather logs and cleanup
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|||||||
97
.github/workflows/gha-e2e-tests.yaml
vendored
97
.github/workflows/gha-e2e-tests.yaml
vendored
@@ -16,7 +16,7 @@ env:
|
|||||||
TARGET_ORG: actions-runner-controller
|
TARGET_ORG: actions-runner-controller
|
||||||
TARGET_REPO: arc_e2e_test_dummy
|
TARGET_REPO: arc_e2e_test_dummy
|
||||||
IMAGE_NAME: "arc-test-image"
|
IMAGE_NAME: "arc-test-image"
|
||||||
IMAGE_VERSION: "0.8.2"
|
IMAGE_VERSION: "0.7.0"
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
# This will make sure we only apply the concurrency limits on pull requests
|
# This will make sure we only apply the concurrency limits on pull requests
|
||||||
@@ -880,98 +880,3 @@ jobs:
|
|||||||
helm uninstall "${{ steps.install_arc.outputs.ARC_NAME }}" --namespace "arc-runners" --debug
|
helm uninstall "${{ steps.install_arc.outputs.ARC_NAME }}" --namespace "arc-runners" --debug
|
||||||
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n "${{ steps.install_arc.outputs.ARC_NAME }}" -l app.kubernetes.io/instance="${{ steps.install_arc.outputs.ARC_NAME }}"
|
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n "${{ steps.install_arc.outputs.ARC_NAME }}" -l app.kubernetes.io/instance="${{ steps.install_arc.outputs.ARC_NAME }}"
|
||||||
kubectl logs deployment/arc-gha-rs-controller -n "arc-systems"
|
kubectl logs deployment/arc-gha-rs-controller -n "arc-systems"
|
||||||
|
|
||||||
init-with-min-runners:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
timeout-minutes: 20
|
|
||||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
|
||||||
env:
|
|
||||||
WORKFLOW_FILE: arc-test-workflow.yaml
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
ref: ${{ github.head_ref }}
|
|
||||||
|
|
||||||
- uses: ./.github/actions/setup-arc-e2e
|
|
||||||
id: setup
|
|
||||||
with:
|
|
||||||
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
|
||||||
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
|
||||||
image-name: ${{env.IMAGE_NAME}}
|
|
||||||
image-tag: ${{env.IMAGE_VERSION}}
|
|
||||||
target-org: ${{env.TARGET_ORG}}
|
|
||||||
|
|
||||||
- name: Install gha-runner-scale-set-controller
|
|
||||||
id: install_arc_controller
|
|
||||||
run: |
|
|
||||||
helm install arc \
|
|
||||||
--namespace "arc-systems" \
|
|
||||||
--create-namespace \
|
|
||||||
--set image.repository=${{ env.IMAGE_NAME }} \
|
|
||||||
--set image.tag=${{ env.IMAGE_VERSION }} \
|
|
||||||
--set flags.updateStrategy="eventual" \
|
|
||||||
./charts/gha-runner-scale-set-controller \
|
|
||||||
--debug
|
|
||||||
count=0
|
|
||||||
while true; do
|
|
||||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
|
||||||
if [ -n "$POD_NAME" ]; then
|
|
||||||
echo "Pod found: $POD_NAME"
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
if [ "$count" -ge 60 ]; then
|
|
||||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
sleep 1
|
|
||||||
count=$((count+1))
|
|
||||||
done
|
|
||||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
|
||||||
kubectl get pod -n arc-systems
|
|
||||||
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
|
||||||
|
|
||||||
- name: Install gha-runner-scale-set
|
|
||||||
id: install_arc
|
|
||||||
run: |
|
|
||||||
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
|
||||||
helm install "$ARC_NAME" \
|
|
||||||
--namespace "arc-runners" \
|
|
||||||
--create-namespace \
|
|
||||||
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
|
||||||
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
|
||||||
--set minRunners=5 \
|
|
||||||
./charts/gha-runner-scale-set \
|
|
||||||
--debug
|
|
||||||
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
|
||||||
count=0
|
|
||||||
while true; do
|
|
||||||
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
|
||||||
if [ -n "$POD_NAME" ]; then
|
|
||||||
echo "Pod found: $POD_NAME"
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
if [ "$count" -ge 60 ]; then
|
|
||||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
sleep 1
|
|
||||||
count=$((count+1))
|
|
||||||
done
|
|
||||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
|
||||||
kubectl get pod -n arc-systems
|
|
||||||
- name: Ensure 5 runners are up
|
|
||||||
run: |
|
|
||||||
count=0
|
|
||||||
while true; do
|
|
||||||
pod_count=$(kubectl get pods -n arc-runners --no-headers | wc -l)
|
|
||||||
if [[ "$pod_count" = 5 ]]; then
|
|
||||||
echo "5 pods are up!"
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
if [[ "$count" -ge 12 ]]; then
|
|
||||||
echo "Timeout waiting for 5 pods to be created"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
sleep 1
|
|
||||||
count=$((count+1))
|
|
||||||
done
|
|
||||||
|
|||||||
@@ -38,7 +38,6 @@ RUN --mount=target=. \
|
|||||||
export GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOARM=${TARGETVARIANT#v} && \
|
export GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOARM=${TARGETVARIANT#v} && \
|
||||||
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/manager main.go && \
|
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/manager main.go && \
|
||||||
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/github-runnerscaleset-listener ./cmd/githubrunnerscalesetlistener && \
|
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/github-runnerscaleset-listener ./cmd/githubrunnerscalesetlistener && \
|
||||||
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/ghalistener ./cmd/ghalistener && \
|
|
||||||
go build -trimpath -ldflags="-s -w" -o /out/github-webhook-server ./cmd/githubwebhookserver && \
|
go build -trimpath -ldflags="-s -w" -o /out/github-webhook-server ./cmd/githubwebhookserver && \
|
||||||
go build -trimpath -ldflags="-s -w" -o /out/actions-metrics-server ./cmd/actionsmetricsserver && \
|
go build -trimpath -ldflags="-s -w" -o /out/actions-metrics-server ./cmd/actionsmetricsserver && \
|
||||||
go build -trimpath -ldflags="-s -w" -o /out/sleep ./cmd/sleep
|
go build -trimpath -ldflags="-s -w" -o /out/sleep ./cmd/sleep
|
||||||
@@ -53,7 +52,6 @@ COPY --from=builder /out/manager .
|
|||||||
COPY --from=builder /out/github-webhook-server .
|
COPY --from=builder /out/github-webhook-server .
|
||||||
COPY --from=builder /out/actions-metrics-server .
|
COPY --from=builder /out/actions-metrics-server .
|
||||||
COPY --from=builder /out/github-runnerscaleset-listener .
|
COPY --from=builder /out/github-runnerscaleset-listener .
|
||||||
COPY --from=builder /out/ghalistener .
|
|
||||||
COPY --from=builder /out/sleep .
|
COPY --from=builder /out/sleep .
|
||||||
|
|
||||||
USER 65532:65532
|
USER 65532:65532
|
||||||
|
|||||||
6
Makefile
6
Makefile
@@ -6,7 +6,7 @@ endif
|
|||||||
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
|
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
|
||||||
VERSION ?= dev
|
VERSION ?= dev
|
||||||
COMMIT_SHA = $(shell git rev-parse HEAD)
|
COMMIT_SHA = $(shell git rev-parse HEAD)
|
||||||
RUNNER_VERSION ?= 2.312.0
|
RUNNER_VERSION ?= 2.311.0
|
||||||
TARGETPLATFORM ?= $(shell arch)
|
TARGETPLATFORM ?= $(shell arch)
|
||||||
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
||||||
RUNNER_TAG ?= ${VERSION}
|
RUNNER_TAG ?= ${VERSION}
|
||||||
@@ -300,6 +300,10 @@ acceptance/runner/startup:
|
|||||||
e2e:
|
e2e:
|
||||||
go test -count=1 -v -timeout 600s -run '^TestE2E$$' ./test/e2e
|
go test -count=1 -v -timeout 600s -run '^TestE2E$$' ./test/e2e
|
||||||
|
|
||||||
|
.PHONY: gha-e2e
|
||||||
|
gha-e2e:
|
||||||
|
bash hack/e2e-test.sh
|
||||||
|
|
||||||
# Upload release file to GitHub.
|
# Upload release file to GitHub.
|
||||||
github-release: release
|
github-release: release
|
||||||
ghr ${VERSION} release/
|
ghr ${VERSION} release/
|
||||||
|
|||||||
@@ -15,13 +15,13 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.8.2
|
version: 0.7.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
# It is recommended to use it with quotes.
|
# It is recommended to use it with quotes.
|
||||||
appVersion: "0.8.2"
|
appVersion: "0.7.0"
|
||||||
|
|
||||||
home: https://github.com/actions/actions-runner-controller
|
home: https://github.com/actions/actions-runner-controller
|
||||||
|
|
||||||
|
|||||||
@@ -15,13 +15,13 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.8.2
|
version: 0.7.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
# It is recommended to use it with quotes.
|
# It is recommended to use it with quotes.
|
||||||
appVersion: "0.8.2"
|
appVersion: "0.7.0"
|
||||||
|
|
||||||
home: https://github.com/actions/actions-runner-controller
|
home: https://github.com/actions/actions-runner-controller
|
||||||
|
|
||||||
|
|||||||
@@ -385,9 +385,6 @@ volumeMounts:
|
|||||||
{{- $setNodeExtraCaCerts = 1 }}
|
{{- $setNodeExtraCaCerts = 1 }}
|
||||||
{{- $setRunnerUpdateCaCerts = 1 }}
|
{{- $setRunnerUpdateCaCerts = 1 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- $mountGitHubServerTLS := 0 }}
|
|
||||||
{{- if or $container.env $setNodeExtraCaCerts $setRunnerUpdateCaCerts }}
|
|
||||||
env:
|
env:
|
||||||
{{- with $container.env }}
|
{{- with $container.env }}
|
||||||
{{- range $i, $env := . }}
|
{{- range $i, $env := . }}
|
||||||
@@ -408,12 +405,10 @@ volumeMounts:
|
|||||||
- name: RUNNER_UPDATE_CA_CERTS
|
- name: RUNNER_UPDATE_CA_CERTS
|
||||||
value: "1"
|
value: "1"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- $mountGitHubServerTLS := 0 }}
|
||||||
{{- if $tlsConfig.runnerMountPath }}
|
{{- if $tlsConfig.runnerMountPath }}
|
||||||
{{- $mountGitHubServerTLS = 1 }}
|
{{- $mountGitHubServerTLS = 1 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{- if or $container.volumeMounts $mountGitHubServerTLS }}
|
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
{{- with $container.volumeMounts }}
|
{{- with $container.volumeMounts }}
|
||||||
{{- range $i, $volMount := . }}
|
{{- range $i, $volMount := . }}
|
||||||
@@ -428,7 +423,6 @@ volumeMounts:
|
|||||||
mountPath: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }}
|
mountPath: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }}
|
||||||
subPath: {{ $tlsConfig.certificateFrom.configMapKeyRef.key }}
|
subPath: {{ $tlsConfig.certificateFrom.configMapKeyRef.key }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end}}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
@@ -2017,75 +2017,3 @@ func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t
|
|||||||
assert.Equal(t, value, autoscalingRunnerSet.Annotations[annotation], fmt.Sprintf("Annotation %q does not match the expected value", annotation))
|
assert.Equal(t, value, autoscalingRunnerSet.Annotations[annotation], fmt.Sprintf("Annotation %q does not match the expected value", annotation))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRunnerContainerEnvNotEmptyMap(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
// Path to the helm chart we will test
|
|
||||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
testValuesPath, err := filepath.Abs("../tests/values.yaml")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
releaseName := "test-runners"
|
|
||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
|
||||||
|
|
||||||
options := &helm.Options{
|
|
||||||
Logger: logger.Discard,
|
|
||||||
ValuesFiles: []string{testValuesPath},
|
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
|
||||||
}
|
|
||||||
|
|
||||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
|
||||||
type testModel struct {
|
|
||||||
Spec struct {
|
|
||||||
Template struct {
|
|
||||||
Spec struct {
|
|
||||||
Containers []map[string]any `yaml:"containers"`
|
|
||||||
} `yaml:"spec"`
|
|
||||||
} `yaml:"template"`
|
|
||||||
} `yaml:"spec"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var m testModel
|
|
||||||
helm.UnmarshalK8SYaml(t, output, &m)
|
|
||||||
_, ok := m.Spec.Template.Spec.Containers[0]["env"]
|
|
||||||
assert.False(t, ok, "env should not be set")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRunnerContainerVolumeNotEmptyMap(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
// Path to the helm chart we will test
|
|
||||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
testValuesPath, err := filepath.Abs("../tests/values.yaml")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
releaseName := "test-runners"
|
|
||||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
|
||||||
|
|
||||||
options := &helm.Options{
|
|
||||||
Logger: logger.Discard,
|
|
||||||
ValuesFiles: []string{testValuesPath},
|
|
||||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
|
||||||
}
|
|
||||||
|
|
||||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
|
||||||
type testModel struct {
|
|
||||||
Spec struct {
|
|
||||||
Template struct {
|
|
||||||
Spec struct {
|
|
||||||
Containers []map[string]any `yaml:"containers"`
|
|
||||||
} `yaml:"spec"`
|
|
||||||
} `yaml:"template"`
|
|
||||||
} `yaml:"spec"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var m testModel
|
|
||||||
helm.UnmarshalK8SYaml(t, output, &m)
|
|
||||||
_, ok := m.Spec.Template.Spec.Containers[0]["volumeMounts"]
|
|
||||||
assert.False(t, ok, "volumeMounts should not be set")
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -39,8 +39,7 @@ githubConfigSecret:
|
|||||||
## maxRunners is the max number of runners the autoscaling runner set will scale up to.
|
## maxRunners is the max number of runners the autoscaling runner set will scale up to.
|
||||||
# maxRunners: 5
|
# maxRunners: 5
|
||||||
|
|
||||||
## minRunners is the min number of idle runners. The target number of runners created will be
|
## minRunners is the min number of runners the autoscaling runner set will scale down to.
|
||||||
## calculated as a sum of minRunners and the number of jobs assigned to the scale set.
|
|
||||||
# minRunners: 0
|
# minRunners: 0
|
||||||
|
|
||||||
# runnerGroup: "default"
|
# runnerGroup: "default"
|
||||||
|
|||||||
@@ -1,133 +0,0 @@
|
|||||||
package app
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
|
|
||||||
"github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
|
|
||||||
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
|
|
||||||
"github.com/actions/actions-runner-controller/cmd/ghalistener/worker"
|
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
|
||||||
"github.com/go-logr/logr"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
|
||||||
|
|
||||||
// App is responsible for initializing required components and running the app.
|
|
||||||
type App struct {
|
|
||||||
// configured fields
|
|
||||||
config config.Config
|
|
||||||
logger logr.Logger
|
|
||||||
|
|
||||||
// initialized fields
|
|
||||||
listener Listener
|
|
||||||
worker Worker
|
|
||||||
metrics metrics.ServerPublisher
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:generate mockery --name Listener --output ./mocks --outpkg mocks --case underscore
|
|
||||||
type Listener interface {
|
|
||||||
Listen(ctx context.Context, handler listener.Handler) error
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:generate mockery --name Worker --output ./mocks --outpkg mocks --case underscore
|
|
||||||
type Worker interface {
|
|
||||||
HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error
|
|
||||||
HandleDesiredRunnerCount(ctx context.Context, count int) (int, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func New(config config.Config) (*App, error) {
|
|
||||||
app := &App{
|
|
||||||
config: config,
|
|
||||||
}
|
|
||||||
|
|
||||||
ghConfig, err := actions.ParseGitHubConfigFromURL(config.ConfigureUrl)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse GitHub config from URL: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
logger, err := config.Logger()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create logger: %w", err)
|
|
||||||
}
|
|
||||||
app.logger = logger.WithName("listener-app")
|
|
||||||
}
|
|
||||||
|
|
||||||
actionsClient, err := config.ActionsClient(app.logger)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create actions client: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.MetricsAddr != "" {
|
|
||||||
app.metrics = metrics.NewExporter(metrics.ExporterConfig{
|
|
||||||
ScaleSetName: config.EphemeralRunnerSetName,
|
|
||||||
ScaleSetNamespace: config.EphemeralRunnerSetNamespace,
|
|
||||||
Enterprise: ghConfig.Enterprise,
|
|
||||||
Organization: ghConfig.Organization,
|
|
||||||
Repository: ghConfig.Repository,
|
|
||||||
ServerAddr: config.MetricsAddr,
|
|
||||||
ServerEndpoint: config.MetricsEndpoint,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
worker, err := worker.New(
|
|
||||||
worker.Config{
|
|
||||||
EphemeralRunnerSetNamespace: config.EphemeralRunnerSetNamespace,
|
|
||||||
EphemeralRunnerSetName: config.EphemeralRunnerSetName,
|
|
||||||
MaxRunners: config.MaxRunners,
|
|
||||||
MinRunners: config.MinRunners,
|
|
||||||
},
|
|
||||||
worker.WithLogger(app.logger.WithName("worker")),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create new kubernetes worker: %w", err)
|
|
||||||
}
|
|
||||||
app.worker = worker
|
|
||||||
|
|
||||||
listener, err := listener.New(listener.Config{
|
|
||||||
Client: actionsClient,
|
|
||||||
ScaleSetID: app.config.RunnerScaleSetId,
|
|
||||||
MinRunners: app.config.MinRunners,
|
|
||||||
MaxRunners: app.config.MaxRunners,
|
|
||||||
Logger: app.logger.WithName("listener"),
|
|
||||||
Metrics: app.metrics,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create new listener: %w", err)
|
|
||||||
}
|
|
||||||
app.listener = listener
|
|
||||||
|
|
||||||
app.logger.Info("app initialized")
|
|
||||||
|
|
||||||
return app, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *App) Run(ctx context.Context) error {
|
|
||||||
var errs []error
|
|
||||||
if app.worker == nil {
|
|
||||||
errs = append(errs, fmt.Errorf("worker not initialized"))
|
|
||||||
}
|
|
||||||
if app.listener == nil {
|
|
||||||
errs = append(errs, fmt.Errorf("listener not initialized"))
|
|
||||||
}
|
|
||||||
if err := errors.Join(errs...); err != nil {
|
|
||||||
return fmt.Errorf("app not initialized: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
g, ctx := errgroup.WithContext(ctx)
|
|
||||||
g.Go(func() error {
|
|
||||||
app.logger.Info("Starting listener")
|
|
||||||
return app.listener.Listen(ctx, app.worker)
|
|
||||||
})
|
|
||||||
|
|
||||||
if app.metrics != nil {
|
|
||||||
g.Go(func() error {
|
|
||||||
app.logger.Info("Starting metrics server")
|
|
||||||
return app.metrics.ListenAndServe(ctx)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return g.Wait()
|
|
||||||
}
|
|
||||||
@@ -1,85 +0,0 @@
|
|||||||
package app
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
appmocks "github.com/actions/actions-runner-controller/cmd/ghalistener/app/mocks"
|
|
||||||
"github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
|
|
||||||
metricsMocks "github.com/actions/actions-runner-controller/cmd/ghalistener/metrics/mocks"
|
|
||||||
"github.com/actions/actions-runner-controller/cmd/ghalistener/worker"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/mock"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestApp_Run(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
t.Run("ListenerWorkerGuard", func(t *testing.T) {
|
|
||||||
invalidApps := []*App{
|
|
||||||
{},
|
|
||||||
{worker: &worker.Worker{}},
|
|
||||||
{listener: &listener.Listener{}},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, app := range invalidApps {
|
|
||||||
assert.Error(t, app.Run(context.Background()))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("ExitsOnListenerError", func(t *testing.T) {
|
|
||||||
listener := appmocks.NewListener(t)
|
|
||||||
worker := appmocks.NewWorker(t)
|
|
||||||
|
|
||||||
listener.On("Listen", mock.Anything, mock.Anything).Return(errors.New("listener error")).Once()
|
|
||||||
|
|
||||||
app := &App{
|
|
||||||
listener: listener,
|
|
||||||
worker: worker,
|
|
||||||
}
|
|
||||||
|
|
||||||
err := app.Run(context.Background())
|
|
||||||
assert.Error(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("ExitsOnListenerNil", func(t *testing.T) {
|
|
||||||
listener := appmocks.NewListener(t)
|
|
||||||
worker := appmocks.NewWorker(t)
|
|
||||||
|
|
||||||
listener.On("Listen", mock.Anything, mock.Anything).Return(nil).Once()
|
|
||||||
|
|
||||||
app := &App{
|
|
||||||
listener: listener,
|
|
||||||
worker: worker,
|
|
||||||
}
|
|
||||||
|
|
||||||
err := app.Run(context.Background())
|
|
||||||
assert.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("CancelListenerOnMetricsServerError", func(t *testing.T) {
|
|
||||||
listener := appmocks.NewListener(t)
|
|
||||||
worker := appmocks.NewWorker(t)
|
|
||||||
metrics := metricsMocks.NewServerPublisher(t)
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
listener.On("Listen", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
|
|
||||||
ctx := args.Get(0).(context.Context)
|
|
||||||
go func() {
|
|
||||||
<-ctx.Done()
|
|
||||||
}()
|
|
||||||
}).Return(nil).Once()
|
|
||||||
|
|
||||||
metrics.On("ListenAndServe", mock.Anything).Return(errors.New("metrics server error")).Once()
|
|
||||||
|
|
||||||
app := &App{
|
|
||||||
listener: listener,
|
|
||||||
worker: worker,
|
|
||||||
metrics: metrics,
|
|
||||||
}
|
|
||||||
|
|
||||||
err := app.Run(ctx)
|
|
||||||
assert.Error(t, err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,43 +0,0 @@
|
|||||||
// Code generated by mockery v2.36.1. DO NOT EDIT.
|
|
||||||
|
|
||||||
package mocks
|
|
||||||
|
|
||||||
import (
|
|
||||||
context "context"
|
|
||||||
|
|
||||||
listener "github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
|
|
||||||
mock "github.com/stretchr/testify/mock"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Listener is an autogenerated mock type for the Listener type
|
|
||||||
type Listener struct {
|
|
||||||
mock.Mock
|
|
||||||
}
|
|
||||||
|
|
||||||
// Listen provides a mock function with given fields: ctx, handler
|
|
||||||
func (_m *Listener) Listen(ctx context.Context, handler listener.Handler) error {
|
|
||||||
ret := _m.Called(ctx, handler)
|
|
||||||
|
|
||||||
var r0 error
|
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, listener.Handler) error); ok {
|
|
||||||
r0 = rf(ctx, handler)
|
|
||||||
} else {
|
|
||||||
r0 = ret.Error(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
return r0
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewListener creates a new instance of Listener. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
|
||||||
// The first argument is typically a *testing.T value.
|
|
||||||
func NewListener(t interface {
|
|
||||||
mock.TestingT
|
|
||||||
Cleanup(func())
|
|
||||||
}) *Listener {
|
|
||||||
mock := &Listener{}
|
|
||||||
mock.Mock.Test(t)
|
|
||||||
|
|
||||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
|
||||||
|
|
||||||
return mock
|
|
||||||
}
|
|
||||||
@@ -1,68 +0,0 @@
|
|||||||
// Code generated by mockery v2.36.1. DO NOT EDIT.
|
|
||||||
|
|
||||||
package mocks
|
|
||||||
|
|
||||||
import (
|
|
||||||
actions "github.com/actions/actions-runner-controller/github/actions"
|
|
||||||
|
|
||||||
context "context"
|
|
||||||
|
|
||||||
mock "github.com/stretchr/testify/mock"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Worker is an autogenerated mock type for the Worker type
|
|
||||||
type Worker struct {
|
|
||||||
mock.Mock
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count
|
|
||||||
func (_m *Worker) HandleDesiredRunnerCount(ctx context.Context, count int) (int, error) {
|
|
||||||
ret := _m.Called(ctx, count)
|
|
||||||
|
|
||||||
var r0 int
|
|
||||||
var r1 error
|
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, int) (int, error)); ok {
|
|
||||||
return rf(ctx, count)
|
|
||||||
}
|
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, int) int); ok {
|
|
||||||
r0 = rf(ctx, count)
|
|
||||||
} else {
|
|
||||||
r0 = ret.Get(0).(int)
|
|
||||||
}
|
|
||||||
|
|
||||||
if rf, ok := ret.Get(1).(func(context.Context, int) error); ok {
|
|
||||||
r1 = rf(ctx, count)
|
|
||||||
} else {
|
|
||||||
r1 = ret.Error(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
return r0, r1
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleJobStarted provides a mock function with given fields: ctx, jobInfo
|
|
||||||
func (_m *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
|
|
||||||
ret := _m.Called(ctx, jobInfo)
|
|
||||||
|
|
||||||
var r0 error
|
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, *actions.JobStarted) error); ok {
|
|
||||||
r0 = rf(ctx, jobInfo)
|
|
||||||
} else {
|
|
||||||
r0 = ret.Error(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
return r0
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWorker creates a new instance of Worker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
|
||||||
// The first argument is typically a *testing.T value.
|
|
||||||
func NewWorker(t interface {
|
|
||||||
mock.TestingT
|
|
||||||
Cleanup(func())
|
|
||||||
}) *Worker {
|
|
||||||
mock := &Worker{}
|
|
||||||
mock.Mock.Test(t)
|
|
||||||
|
|
||||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
|
||||||
|
|
||||||
return mock
|
|
||||||
}
|
|
||||||
@@ -1,161 +0,0 @@
|
|||||||
package config
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/x509"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/actions/actions-runner-controller/build"
|
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
|
||||||
"github.com/actions/actions-runner-controller/logging"
|
|
||||||
"github.com/go-logr/logr"
|
|
||||||
"golang.org/x/net/http/httpproxy"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Config struct {
|
|
||||||
ConfigureUrl string `json:"configureUrl"`
|
|
||||||
AppID int64 `json:"appID"`
|
|
||||||
AppInstallationID int64 `json:"appInstallationID"`
|
|
||||||
AppPrivateKey string `json:"appPrivateKey"`
|
|
||||||
Token string `json:"token"`
|
|
||||||
EphemeralRunnerSetNamespace string `json:"ephemeralRunnerSetNamespace"`
|
|
||||||
EphemeralRunnerSetName string `json:"ephemeralRunnerSetName"`
|
|
||||||
MaxRunners int `json:"maxRunners"`
|
|
||||||
MinRunners int `json:"minRunners"`
|
|
||||||
RunnerScaleSetId int `json:"runnerScaleSetId"`
|
|
||||||
RunnerScaleSetName string `json:"runnerScaleSetName"`
|
|
||||||
ServerRootCA string `json:"serverRootCA"`
|
|
||||||
LogLevel string `json:"logLevel"`
|
|
||||||
LogFormat string `json:"logFormat"`
|
|
||||||
MetricsAddr string `json:"metricsAddr"`
|
|
||||||
MetricsEndpoint string `json:"metricsEndpoint"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func Read(path string) (Config, error) {
|
|
||||||
f, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return Config{}, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
var config Config
|
|
||||||
if err := json.NewDecoder(f).Decode(&config); err != nil {
|
|
||||||
return Config{}, fmt.Errorf("failed to decode config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := config.validate(); err != nil {
|
|
||||||
return Config{}, fmt.Errorf("failed to validate config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return config, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) validate() error {
|
|
||||||
if len(c.ConfigureUrl) == 0 {
|
|
||||||
return fmt.Errorf("GitHubConfigUrl is not provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(c.EphemeralRunnerSetNamespace) == 0 || len(c.EphemeralRunnerSetName) == 0 {
|
|
||||||
return fmt.Errorf("EphemeralRunnerSetNamespace '%s' or EphemeralRunnerSetName '%s' is missing", c.EphemeralRunnerSetNamespace, c.EphemeralRunnerSetName)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.RunnerScaleSetId == 0 {
|
|
||||||
return fmt.Errorf("RunnerScaleSetId '%d' is missing", c.RunnerScaleSetId)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.MaxRunners < c.MinRunners {
|
|
||||||
return fmt.Errorf("MinRunners '%d' cannot be greater than MaxRunners '%d'", c.MinRunners, c.MaxRunners)
|
|
||||||
}
|
|
||||||
|
|
||||||
hasToken := len(c.Token) > 0
|
|
||||||
hasPrivateKeyConfig := c.AppID > 0 && c.AppPrivateKey != ""
|
|
||||||
|
|
||||||
if !hasToken && !hasPrivateKeyConfig {
|
|
||||||
return fmt.Errorf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
|
|
||||||
}
|
|
||||||
|
|
||||||
if hasToken && hasPrivateKeyConfig {
|
|
||||||
return fmt.Errorf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) Logger() (logr.Logger, error) {
|
|
||||||
logLevel := string(logging.LogLevelDebug)
|
|
||||||
if c.LogLevel != "" {
|
|
||||||
logLevel = c.LogLevel
|
|
||||||
}
|
|
||||||
|
|
||||||
logFormat := string(logging.LogFormatText)
|
|
||||||
if c.LogFormat != "" {
|
|
||||||
logFormat = c.LogFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
logger, err := logging.NewLogger(logLevel, logFormat)
|
|
||||||
if err != nil {
|
|
||||||
return logr.Logger{}, fmt.Errorf("NewLogger failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return logger, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) ActionsClient(logger logr.Logger, clientOptions ...actions.ClientOption) (*actions.Client, error) {
|
|
||||||
var creds actions.ActionsAuth
|
|
||||||
switch c.Token {
|
|
||||||
case "":
|
|
||||||
creds.AppCreds = &actions.GitHubAppAuth{
|
|
||||||
AppID: c.AppID,
|
|
||||||
AppInstallationID: c.AppInstallationID,
|
|
||||||
AppPrivateKey: c.AppPrivateKey,
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
creds.Token = c.Token
|
|
||||||
}
|
|
||||||
|
|
||||||
options := append([]actions.ClientOption{
|
|
||||||
actions.WithLogger(logger),
|
|
||||||
}, clientOptions...)
|
|
||||||
|
|
||||||
if c.ServerRootCA != "" {
|
|
||||||
systemPool, err := x509.SystemCertPool()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to load system cert pool: %w", err)
|
|
||||||
}
|
|
||||||
pool := systemPool.Clone()
|
|
||||||
ok := pool.AppendCertsFromPEM([]byte(c.ServerRootCA))
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("failed to parse root certificate")
|
|
||||||
}
|
|
||||||
|
|
||||||
options = append(options, actions.WithRootCAs(pool))
|
|
||||||
}
|
|
||||||
|
|
||||||
proxyFunc := httpproxy.FromEnvironment().ProxyFunc()
|
|
||||||
options = append(options, actions.WithProxy(func(req *http.Request) (*url.URL, error) {
|
|
||||||
return proxyFunc(req.URL)
|
|
||||||
}))
|
|
||||||
|
|
||||||
client, err := actions.NewClient(c.ConfigureUrl, &creds, options...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create actions client: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
client.SetUserAgent(actions.UserAgentInfo{
|
|
||||||
Version: build.Version,
|
|
||||||
CommitSHA: build.CommitSHA,
|
|
||||||
ScaleSetID: c.RunnerScaleSetId,
|
|
||||||
HasProxy: hasProxy(),
|
|
||||||
Subsystem: "ghalistener",
|
|
||||||
})
|
|
||||||
|
|
||||||
return client, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasProxy() bool {
|
|
||||||
proxyFunc := httpproxy.FromEnvironment().ProxyFunc()
|
|
||||||
return proxyFunc != nil
|
|
||||||
}
|
|
||||||
@@ -1,161 +0,0 @@
|
|||||||
package config_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/tls"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
|
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
|
||||||
"github.com/actions/actions-runner-controller/github/actions/testserver"
|
|
||||||
"github.com/go-logr/logr"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCustomerServerRootCA(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
certsFolder := filepath.Join(
|
|
||||||
"../../../",
|
|
||||||
"github",
|
|
||||||
"actions",
|
|
||||||
"testdata",
|
|
||||||
)
|
|
||||||
certPath := filepath.Join(certsFolder, "server.crt")
|
|
||||||
keyPath := filepath.Join(certsFolder, "server.key")
|
|
||||||
|
|
||||||
serverCalledSuccessfully := false
|
|
||||||
|
|
||||||
server := testserver.NewUnstarted(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
|
||||||
serverCalledSuccessfully = true
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
w.Write([]byte(`{"count": 0}`))
|
|
||||||
}))
|
|
||||||
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
server.TLS = &tls.Config{Certificates: []tls.Certificate{cert}}
|
|
||||||
server.StartTLS()
|
|
||||||
|
|
||||||
var certsString string
|
|
||||||
rootCA, err := os.ReadFile(filepath.Join(certsFolder, "rootCA.crt"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
certsString = string(rootCA)
|
|
||||||
|
|
||||||
intermediate, err := os.ReadFile(filepath.Join(certsFolder, "intermediate.pem"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
certsString = certsString + string(intermediate)
|
|
||||||
|
|
||||||
config := config.Config{
|
|
||||||
ConfigureUrl: server.ConfigURLForOrg("myorg"),
|
|
||||||
ServerRootCA: certsString,
|
|
||||||
Token: "token",
|
|
||||||
}
|
|
||||||
|
|
||||||
client, err := config.ActionsClient(logr.Discard())
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = client.GetRunnerScaleSet(ctx, 1, "test")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.True(t, serverCalledSuccessfully)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProxySettings(t *testing.T) {
|
|
||||||
t.Run("http", func(t *testing.T) {
|
|
||||||
wentThroughProxy := false
|
|
||||||
|
|
||||||
proxy := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
|
|
||||||
wentThroughProxy = true
|
|
||||||
}))
|
|
||||||
t.Cleanup(func() {
|
|
||||||
proxy.Close()
|
|
||||||
})
|
|
||||||
|
|
||||||
prevProxy := os.Getenv("http_proxy")
|
|
||||||
os.Setenv("http_proxy", proxy.URL)
|
|
||||||
defer os.Setenv("http_proxy", prevProxy)
|
|
||||||
|
|
||||||
config := config.Config{
|
|
||||||
ConfigureUrl: "https://github.com/org/repo",
|
|
||||||
Token: "token",
|
|
||||||
}
|
|
||||||
|
|
||||||
client, err := config.ActionsClient(logr.Discard())
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = client.Do(req)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.True(t, wentThroughProxy)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("https", func(t *testing.T) {
|
|
||||||
wentThroughProxy := false
|
|
||||||
|
|
||||||
proxy := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
|
|
||||||
wentThroughProxy = true
|
|
||||||
}))
|
|
||||||
t.Cleanup(func() {
|
|
||||||
proxy.Close()
|
|
||||||
})
|
|
||||||
|
|
||||||
prevProxy := os.Getenv("https_proxy")
|
|
||||||
os.Setenv("https_proxy", proxy.URL)
|
|
||||||
defer os.Setenv("https_proxy", prevProxy)
|
|
||||||
|
|
||||||
config := config.Config{
|
|
||||||
ConfigureUrl: "https://github.com/org/repo",
|
|
||||||
Token: "token",
|
|
||||||
}
|
|
||||||
|
|
||||||
client, err := config.ActionsClient(logr.Discard(), actions.WithRetryMax(0))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodGet, "https://example.com", nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, err = client.Do(req)
|
|
||||||
// proxy doesn't support https
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.True(t, wentThroughProxy)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("no_proxy", func(t *testing.T) {
|
|
||||||
wentThroughProxy := false
|
|
||||||
|
|
||||||
proxy := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
|
|
||||||
wentThroughProxy = true
|
|
||||||
}))
|
|
||||||
t.Cleanup(func() {
|
|
||||||
proxy.Close()
|
|
||||||
})
|
|
||||||
|
|
||||||
prevProxy := os.Getenv("http_proxy")
|
|
||||||
os.Setenv("http_proxy", proxy.URL)
|
|
||||||
defer os.Setenv("http_proxy", prevProxy)
|
|
||||||
|
|
||||||
prevNoProxy := os.Getenv("no_proxy")
|
|
||||||
os.Setenv("no_proxy", "example.com")
|
|
||||||
defer os.Setenv("no_proxy", prevNoProxy)
|
|
||||||
|
|
||||||
config := config.Config{
|
|
||||||
ConfigureUrl: "https://github.com/org/repo",
|
|
||||||
Token: "token",
|
|
||||||
}
|
|
||||||
|
|
||||||
client, err := config.ActionsClient(logr.Discard())
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, err = client.Do(req)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.False(t, wentThroughProxy)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,92 +0,0 @@
|
|||||||
package config
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestConfigValidationMinMax(t *testing.T) {
|
|
||||||
config := &Config{
|
|
||||||
ConfigureUrl: "github.com/some_org/some_repo",
|
|
||||||
EphemeralRunnerSetNamespace: "namespace",
|
|
||||||
EphemeralRunnerSetName: "deployment",
|
|
||||||
RunnerScaleSetId: 1,
|
|
||||||
MinRunners: 5,
|
|
||||||
MaxRunners: 2,
|
|
||||||
Token: "token",
|
|
||||||
}
|
|
||||||
err := config.validate()
|
|
||||||
assert.ErrorContains(t, err, "MinRunners '5' cannot be greater than MaxRunners '2", "Expected error about MinRunners > MaxRunners")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigValidationMissingToken(t *testing.T) {
|
|
||||||
config := &Config{
|
|
||||||
ConfigureUrl: "github.com/some_org/some_repo",
|
|
||||||
EphemeralRunnerSetNamespace: "namespace",
|
|
||||||
EphemeralRunnerSetName: "deployment",
|
|
||||||
RunnerScaleSetId: 1,
|
|
||||||
}
|
|
||||||
err := config.validate()
|
|
||||||
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
|
||||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigValidationAppKey(t *testing.T) {
|
|
||||||
config := &Config{
|
|
||||||
AppID: 1,
|
|
||||||
AppInstallationID: 10,
|
|
||||||
ConfigureUrl: "github.com/some_org/some_repo",
|
|
||||||
EphemeralRunnerSetNamespace: "namespace",
|
|
||||||
EphemeralRunnerSetName: "deployment",
|
|
||||||
RunnerScaleSetId: 1,
|
|
||||||
}
|
|
||||||
err := config.validate()
|
|
||||||
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
|
||||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) {
|
|
||||||
config := &Config{
|
|
||||||
AppID: 1,
|
|
||||||
AppInstallationID: 10,
|
|
||||||
AppPrivateKey: "asdf",
|
|
||||||
Token: "asdf",
|
|
||||||
ConfigureUrl: "github.com/some_org/some_repo",
|
|
||||||
EphemeralRunnerSetNamespace: "namespace",
|
|
||||||
EphemeralRunnerSetName: "deployment",
|
|
||||||
RunnerScaleSetId: 1,
|
|
||||||
}
|
|
||||||
err := config.validate()
|
|
||||||
expectedError := fmt.Sprintf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
|
||||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigValidation(t *testing.T) {
|
|
||||||
config := &Config{
|
|
||||||
ConfigureUrl: "https://github.com/actions",
|
|
||||||
EphemeralRunnerSetNamespace: "namespace",
|
|
||||||
EphemeralRunnerSetName: "deployment",
|
|
||||||
RunnerScaleSetId: 1,
|
|
||||||
MinRunners: 1,
|
|
||||||
MaxRunners: 5,
|
|
||||||
Token: "asdf",
|
|
||||||
}
|
|
||||||
|
|
||||||
err := config.validate()
|
|
||||||
|
|
||||||
assert.NoError(t, err, "Expected no error")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigValidationConfigUrl(t *testing.T) {
|
|
||||||
config := &Config{
|
|
||||||
EphemeralRunnerSetNamespace: "namespace",
|
|
||||||
EphemeralRunnerSetName: "deployment",
|
|
||||||
RunnerScaleSetId: 1,
|
|
||||||
}
|
|
||||||
|
|
||||||
err := config.validate()
|
|
||||||
|
|
||||||
assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl")
|
|
||||||
}
|
|
||||||
@@ -1,431 +0,0 @@
|
|||||||
package listener
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
|
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
|
||||||
"github.com/go-logr/logr"
|
|
||||||
"github.com/google/uuid"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
sessionCreationMaxRetries = 10
|
|
||||||
)
|
|
||||||
|
|
||||||
// message types
|
|
||||||
const (
|
|
||||||
messageTypeJobAvailable = "JobAvailable"
|
|
||||||
messageTypeJobAssigned = "JobAssigned"
|
|
||||||
messageTypeJobStarted = "JobStarted"
|
|
||||||
messageTypeJobCompleted = "JobCompleted"
|
|
||||||
)
|
|
||||||
|
|
||||||
//go:generate mockery --name Client --output ./mocks --outpkg mocks --case underscore
|
|
||||||
type Client interface {
|
|
||||||
GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*actions.AcquirableJobList, error)
|
|
||||||
CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*actions.RunnerScaleSetSession, error)
|
|
||||||
GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64) (*actions.RunnerScaleSetMessage, error)
|
|
||||||
DeleteMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, messageId int64) error
|
|
||||||
AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error)
|
|
||||||
RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*actions.RunnerScaleSetSession, error)
|
|
||||||
DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type Config struct {
|
|
||||||
Client Client
|
|
||||||
ScaleSetID int
|
|
||||||
MinRunners int
|
|
||||||
MaxRunners int
|
|
||||||
Logger logr.Logger
|
|
||||||
Metrics metrics.Publisher
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) Validate() error {
|
|
||||||
if c.Client == nil {
|
|
||||||
return errors.New("client is required")
|
|
||||||
}
|
|
||||||
if c.ScaleSetID == 0 {
|
|
||||||
return errors.New("scaleSetID is required")
|
|
||||||
}
|
|
||||||
if c.MinRunners < 0 {
|
|
||||||
return errors.New("minRunners must be greater than or equal to 0")
|
|
||||||
}
|
|
||||||
if c.MaxRunners < 0 {
|
|
||||||
return errors.New("maxRunners must be greater than or equal to 0")
|
|
||||||
}
|
|
||||||
if c.MaxRunners > 0 && c.MinRunners > c.MaxRunners {
|
|
||||||
return errors.New("minRunners must be less than or equal to maxRunners")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// The Listener's role is to manage all interactions with the actions service.
|
|
||||||
// It receives messages and processes them using the given handler.
|
|
||||||
type Listener struct {
|
|
||||||
// configured fields
|
|
||||||
scaleSetID int // The ID of the scale set associated with the listener.
|
|
||||||
client Client // The client used to interact with the scale set.
|
|
||||||
metrics metrics.Publisher // The publisher used to publish metrics.
|
|
||||||
|
|
||||||
// internal fields
|
|
||||||
logger logr.Logger // The logger used for logging.
|
|
||||||
hostname string // The hostname of the listener.
|
|
||||||
|
|
||||||
// updated fields
|
|
||||||
lastMessageID int64 // The ID of the last processed message.
|
|
||||||
session *actions.RunnerScaleSetSession // The session for managing the runner scale set.
|
|
||||||
}
|
|
||||||
|
|
||||||
func New(config Config) (*Listener, error) {
|
|
||||||
if err := config.Validate(); err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
listener := &Listener{
|
|
||||||
scaleSetID: config.ScaleSetID,
|
|
||||||
client: config.Client,
|
|
||||||
logger: config.Logger,
|
|
||||||
metrics: metrics.Discard,
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.Metrics != nil {
|
|
||||||
listener.metrics = config.Metrics
|
|
||||||
}
|
|
||||||
|
|
||||||
listener.metrics.PublishStatic(config.MinRunners, config.MaxRunners)
|
|
||||||
|
|
||||||
hostname, err := os.Hostname()
|
|
||||||
if err != nil {
|
|
||||||
hostname = uuid.NewString()
|
|
||||||
listener.logger.Info("Failed to get hostname, fallback to uuid", "uuid", hostname, "error", err)
|
|
||||||
}
|
|
||||||
listener.hostname = hostname
|
|
||||||
|
|
||||||
return listener, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:generate mockery --name Handler --output ./mocks --outpkg mocks --case underscore
|
|
||||||
type Handler interface {
|
|
||||||
HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error
|
|
||||||
HandleDesiredRunnerCount(ctx context.Context, count int) (int, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Listen listens for incoming messages and handles them using the provided handler.
|
|
||||||
// It continuously listens for messages until the context is cancelled.
|
|
||||||
// The initial message contains the current statistics and acquirable jobs, if any.
|
|
||||||
// The handler is responsible for handling the initial message and subsequent messages.
|
|
||||||
// If an error occurs during any step, Listen returns an error.
|
|
||||||
func (l *Listener) Listen(ctx context.Context, handler Handler) error {
|
|
||||||
if err := l.createSession(ctx); err != nil {
|
|
||||||
return fmt.Errorf("createSession failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err := l.deleteMessageSession(); err != nil {
|
|
||||||
l.logger.Error(err, "failed to delete message session")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
initialMessage := &actions.RunnerScaleSetMessage{
|
|
||||||
MessageId: 0,
|
|
||||||
MessageType: "RunnerScaleSetJobMessages",
|
|
||||||
Statistics: l.session.Statistics,
|
|
||||||
Body: "",
|
|
||||||
}
|
|
||||||
|
|
||||||
if l.session.Statistics == nil {
|
|
||||||
return fmt.Errorf("session statistics is nil")
|
|
||||||
}
|
|
||||||
l.metrics.PublishStatistics(initialMessage.Statistics)
|
|
||||||
|
|
||||||
desiredRunners, err := handler.HandleDesiredRunnerCount(ctx, initialMessage.Statistics.TotalAssignedJobs)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("handling initial message failed: %w", err)
|
|
||||||
}
|
|
||||||
l.metrics.PublishDesiredRunners(desiredRunners)
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
msg, err := l.getMessage(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to get message: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if msg == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// New context is created to avoid cancelation during message handling.
|
|
||||||
if err := l.handleMessage(context.Background(), handler, msg); err != nil {
|
|
||||||
return fmt.Errorf("failed to handle message: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Listener) handleMessage(ctx context.Context, handler Handler, msg *actions.RunnerScaleSetMessage) error {
|
|
||||||
parsedMsg, err := l.parseMessage(ctx, msg)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to parse message: %w", err)
|
|
||||||
}
|
|
||||||
l.metrics.PublishStatistics(parsedMsg.statistics)
|
|
||||||
|
|
||||||
if len(parsedMsg.jobsAvailable) > 0 {
|
|
||||||
acquiredJobIDs, err := l.acquireAvailableJobs(ctx, parsedMsg.jobsAvailable)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to acquire jobs: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
l.logger.Info("Jobs are acquired", "count", len(acquiredJobIDs), "requestIds", fmt.Sprint(acquiredJobIDs))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, jobCompleted := range parsedMsg.jobsCompleted {
|
|
||||||
l.metrics.PublishJobCompleted(jobCompleted)
|
|
||||||
}
|
|
||||||
|
|
||||||
l.lastMessageID = msg.MessageId
|
|
||||||
|
|
||||||
if err := l.deleteLastMessage(ctx); err != nil {
|
|
||||||
return fmt.Errorf("failed to delete message: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, jobStarted := range parsedMsg.jobsStarted {
|
|
||||||
if err := handler.HandleJobStarted(ctx, jobStarted); err != nil {
|
|
||||||
return fmt.Errorf("failed to handle job started: %w", err)
|
|
||||||
}
|
|
||||||
l.metrics.PublishJobStarted(jobStarted)
|
|
||||||
}
|
|
||||||
|
|
||||||
desiredRunners, err := handler.HandleDesiredRunnerCount(ctx, parsedMsg.statistics.TotalAssignedJobs)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to handle desired runner count: %w", err)
|
|
||||||
}
|
|
||||||
l.metrics.PublishDesiredRunners(desiredRunners)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Listener) createSession(ctx context.Context) error {
|
|
||||||
var session *actions.RunnerScaleSetSession
|
|
||||||
var retries int
|
|
||||||
|
|
||||||
for {
|
|
||||||
var err error
|
|
||||||
session, err = l.client.CreateMessageSession(ctx, l.scaleSetID, l.hostname)
|
|
||||||
if err == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
clientErr := &actions.HttpClientSideError{}
|
|
||||||
if !errors.As(err, &clientErr) {
|
|
||||||
return fmt.Errorf("failed to create session: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if clientErr.Code != http.StatusConflict {
|
|
||||||
return fmt.Errorf("failed to create session: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
retries++
|
|
||||||
if retries >= sessionCreationMaxRetries {
|
|
||||||
return fmt.Errorf("failed to create session after %d retries: %w", retries, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
l.logger.Info("Unable to create message session. Will try again in 30 seconds", "error", err.Error())
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return fmt.Errorf("context cancelled: %w", ctx.Err())
|
|
||||||
case <-time.After(30 * time.Second):
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
statistics, err := json.Marshal(session.Statistics)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to marshal statistics: %w", err)
|
|
||||||
}
|
|
||||||
l.logger.Info("Current runner scale set statistics.", "statistics", string(statistics))
|
|
||||||
|
|
||||||
l.session = session
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Listener) getMessage(ctx context.Context) (*actions.RunnerScaleSetMessage, error) {
|
|
||||||
l.logger.Info("Getting next message", "lastMessageID", l.lastMessageID)
|
|
||||||
msg, err := l.client.GetMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID)
|
|
||||||
if err == nil { // if NO error
|
|
||||||
return msg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
expiredError := &actions.MessageQueueTokenExpiredError{}
|
|
||||||
if !errors.As(err, &expiredError) {
|
|
||||||
return nil, fmt.Errorf("failed to get next message: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := l.refreshSession(ctx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
l.logger.Info("Getting next message", "lastMessageID", l.lastMessageID)
|
|
||||||
|
|
||||||
msg, err = l.client.GetMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID)
|
|
||||||
if err != nil { // if NO error
|
|
||||||
return nil, fmt.Errorf("failed to get next message after message session refresh: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return msg, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Listener) deleteLastMessage(ctx context.Context) error {
|
|
||||||
l.logger.Info("Deleting last message", "lastMessageID", l.lastMessageID)
|
|
||||||
if err := l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID); err != nil {
|
|
||||||
return fmt.Errorf("failed to delete message: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type parsedMessage struct {
|
|
||||||
statistics *actions.RunnerScaleSetStatistic
|
|
||||||
jobsStarted []*actions.JobStarted
|
|
||||||
jobsAvailable []*actions.JobAvailable
|
|
||||||
jobsCompleted []*actions.JobCompleted
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSetMessage) (*parsedMessage, error) {
|
|
||||||
if msg.MessageType != "RunnerScaleSetJobMessages" {
|
|
||||||
l.logger.Info("Skipping message", "messageType", msg.MessageType)
|
|
||||||
return nil, fmt.Errorf("invalid message type: %s", msg.MessageType)
|
|
||||||
}
|
|
||||||
|
|
||||||
l.logger.Info("Processing message", "messageId", msg.MessageId, "messageType", msg.MessageType)
|
|
||||||
if msg.Statistics == nil {
|
|
||||||
return nil, fmt.Errorf("invalid message: statistics is nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
l.logger.Info("New runner scale set statistics.", "statistics", msg.Statistics)
|
|
||||||
|
|
||||||
var batchedMessages []json.RawMessage
|
|
||||||
if len(msg.Body) > 0 {
|
|
||||||
if err := json.Unmarshal([]byte(msg.Body), &batchedMessages); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to unmarshal batched messages: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
parsedMsg := &parsedMessage{
|
|
||||||
statistics: msg.Statistics,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, msg := range batchedMessages {
|
|
||||||
var messageType actions.JobMessageType
|
|
||||||
if err := json.Unmarshal(msg, &messageType); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to decode job message type: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch messageType.MessageType {
|
|
||||||
case messageTypeJobAvailable:
|
|
||||||
var jobAvailable actions.JobAvailable
|
|
||||||
if err := json.Unmarshal(msg, &jobAvailable); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to decode job available: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
l.logger.Info("Job available message received", "jobId", jobAvailable.RunnerRequestId)
|
|
||||||
parsedMsg.jobsAvailable = append(parsedMsg.jobsAvailable, &jobAvailable)
|
|
||||||
|
|
||||||
case messageTypeJobAssigned:
|
|
||||||
var jobAssigned actions.JobAssigned
|
|
||||||
if err := json.Unmarshal(msg, &jobAssigned); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to decode job assigned: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
l.logger.Info("Job assigned message received", "jobId", jobAssigned.RunnerRequestId)
|
|
||||||
|
|
||||||
case messageTypeJobStarted:
|
|
||||||
var jobStarted actions.JobStarted
|
|
||||||
if err := json.Unmarshal(msg, &jobStarted); err != nil {
|
|
||||||
return nil, fmt.Errorf("could not decode job started message. %w", err)
|
|
||||||
}
|
|
||||||
l.logger.Info("Job started message received.", "RequestId", jobStarted.RunnerRequestId, "RunnerId", jobStarted.RunnerId)
|
|
||||||
parsedMsg.jobsStarted = append(parsedMsg.jobsStarted, &jobStarted)
|
|
||||||
|
|
||||||
case messageTypeJobCompleted:
|
|
||||||
var jobCompleted actions.JobCompleted
|
|
||||||
if err := json.Unmarshal(msg, &jobCompleted); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to decode job completed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
l.logger.Info("Job completed message received.", "RequestId", jobCompleted.RunnerRequestId, "Result", jobCompleted.Result, "RunnerId", jobCompleted.RunnerId, "RunnerName", jobCompleted.RunnerName)
|
|
||||||
parsedMsg.jobsCompleted = append(parsedMsg.jobsCompleted, &jobCompleted)
|
|
||||||
|
|
||||||
default:
|
|
||||||
l.logger.Info("unknown job message type.", "messageType", messageType.MessageType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return parsedMsg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*actions.JobAvailable) ([]int64, error) {
|
|
||||||
ids := make([]int64, 0, len(jobsAvailable))
|
|
||||||
for _, job := range jobsAvailable {
|
|
||||||
ids = append(ids, job.RunnerRequestId)
|
|
||||||
}
|
|
||||||
|
|
||||||
l.logger.Info("Acquiring jobs", "count", len(ids), "requestIds", fmt.Sprint(ids))
|
|
||||||
|
|
||||||
ids, err := l.client.AcquireJobs(ctx, l.scaleSetID, l.session.MessageQueueAccessToken, ids)
|
|
||||||
if err == nil { // if NO errors
|
|
||||||
return ids, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
expiredError := &actions.MessageQueueTokenExpiredError{}
|
|
||||||
if !errors.As(err, &expiredError) {
|
|
||||||
return nil, fmt.Errorf("failed to acquire jobs: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := l.refreshSession(ctx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ids, err = l.client.AcquireJobs(ctx, l.scaleSetID, l.session.MessageQueueAccessToken, ids)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to acquire jobs after session refresh: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ids, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Listener) refreshSession(ctx context.Context) error {
|
|
||||||
l.logger.Info("Message queue token is expired during GetNextMessage, refreshing...")
|
|
||||||
session, err := l.client.RefreshMessageSession(ctx, l.session.RunnerScaleSet.Id, l.session.SessionId)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("refresh message session failed. %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
l.session = session
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Listener) deleteMessageSession() error {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
l.logger.Info("Deleting message session")
|
|
||||||
|
|
||||||
if err := l.client.DeleteMessageSession(ctx, l.session.RunnerScaleSet.Id, l.session.SessionId); err != nil {
|
|
||||||
return fmt.Errorf("failed to delete message session: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,865 +0,0 @@
|
|||||||
package listener
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"net/http"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
listenermocks "github.com/actions/actions-runner-controller/cmd/ghalistener/listener/mocks"
|
|
||||||
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
|
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/mock"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestNew(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
t.Run("InvalidConfig", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
var config Config
|
|
||||||
_, err := New(config)
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("ValidConfig", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
config := Config{
|
|
||||||
Client: listenermocks.NewClient(t),
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics.Discard,
|
|
||||||
}
|
|
||||||
l, err := New(config)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.NotNil(t, l)
|
|
||||||
})
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestListener_createSession(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
t.Run("FailOnce", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
config := Config{
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics.Discard,
|
|
||||||
}
|
|
||||||
|
|
||||||
client := listenermocks.NewClient(t)
|
|
||||||
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(nil, assert.AnError).Once()
|
|
||||||
config.Client = client
|
|
||||||
|
|
||||||
l, err := New(config)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
err = l.createSession(ctx)
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("FailContext", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
config := Config{
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics.Discard,
|
|
||||||
}
|
|
||||||
|
|
||||||
client := listenermocks.NewClient(t)
|
|
||||||
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(nil,
|
|
||||||
&actions.HttpClientSideError{Code: http.StatusConflict}).Once()
|
|
||||||
config.Client = client
|
|
||||||
|
|
||||||
l, err := New(config)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
err = l.createSession(ctx)
|
|
||||||
assert.True(t, errors.Is(err, context.DeadlineExceeded))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("SetsSession", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
config := Config{
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics.Discard,
|
|
||||||
}
|
|
||||||
|
|
||||||
client := listenermocks.NewClient(t)
|
|
||||||
|
|
||||||
uuid := uuid.New()
|
|
||||||
session := &actions.RunnerScaleSetSession{
|
|
||||||
SessionId: &uuid,
|
|
||||||
OwnerName: "example",
|
|
||||||
RunnerScaleSet: &actions.RunnerScaleSet{},
|
|
||||||
MessageQueueUrl: "https://example.com",
|
|
||||||
MessageQueueAccessToken: "1234567890",
|
|
||||||
Statistics: nil,
|
|
||||||
}
|
|
||||||
client.On("CreateMessageSession", mock.Anything, mock.Anything, mock.Anything).Return(session, nil).Once()
|
|
||||||
config.Client = client
|
|
||||||
|
|
||||||
l, err := New(config)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
err = l.createSession(context.Background())
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, session, l.session)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestListener_getMessage(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
t.Run("ReceivesMessage", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
config := Config{
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics.Discard,
|
|
||||||
}
|
|
||||||
|
|
||||||
client := listenermocks.NewClient(t)
|
|
||||||
want := &actions.RunnerScaleSetMessage{
|
|
||||||
MessageId: 1,
|
|
||||||
}
|
|
||||||
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(want, nil).Once()
|
|
||||||
config.Client = client
|
|
||||||
|
|
||||||
l, err := New(config)
|
|
||||||
require.Nil(t, err)
|
|
||||||
l.session = &actions.RunnerScaleSetSession{}
|
|
||||||
|
|
||||||
got, err := l.getMessage(ctx)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, want, got)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("NotExpiredError", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
config := Config{
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics.Discard,
|
|
||||||
}
|
|
||||||
|
|
||||||
client := listenermocks.NewClient(t)
|
|
||||||
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, &actions.HttpClientSideError{Code: http.StatusNotFound}).Once()
|
|
||||||
config.Client = client
|
|
||||||
|
|
||||||
l, err := New(config)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
l.session = &actions.RunnerScaleSetSession{}
|
|
||||||
|
|
||||||
_, err = l.getMessage(ctx)
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("RefreshAndSucceeds", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
config := Config{
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics.Discard,
|
|
||||||
}
|
|
||||||
|
|
||||||
client := listenermocks.NewClient(t)
|
|
||||||
|
|
||||||
uuid := uuid.New()
|
|
||||||
session := &actions.RunnerScaleSetSession{
|
|
||||||
SessionId: &uuid,
|
|
||||||
OwnerName: "example",
|
|
||||||
RunnerScaleSet: &actions.RunnerScaleSet{},
|
|
||||||
MessageQueueUrl: "https://example.com",
|
|
||||||
MessageQueueAccessToken: "1234567890",
|
|
||||||
Statistics: nil,
|
|
||||||
}
|
|
||||||
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
|
|
||||||
|
|
||||||
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once()
|
|
||||||
|
|
||||||
want := &actions.RunnerScaleSetMessage{
|
|
||||||
MessageId: 1,
|
|
||||||
}
|
|
||||||
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(want, nil).Once()
|
|
||||||
|
|
||||||
config.Client = client
|
|
||||||
|
|
||||||
l, err := New(config)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
l.session = &actions.RunnerScaleSetSession{
|
|
||||||
SessionId: &uuid,
|
|
||||||
RunnerScaleSet: &actions.RunnerScaleSet{},
|
|
||||||
}
|
|
||||||
|
|
||||||
got, err := l.getMessage(ctx)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, want, got)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("RefreshAndFails", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
config := Config{
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics.Discard,
|
|
||||||
}
|
|
||||||
|
|
||||||
client := listenermocks.NewClient(t)
|
|
||||||
|
|
||||||
uuid := uuid.New()
|
|
||||||
session := &actions.RunnerScaleSetSession{
|
|
||||||
SessionId: &uuid,
|
|
||||||
OwnerName: "example",
|
|
||||||
RunnerScaleSet: &actions.RunnerScaleSet{},
|
|
||||||
MessageQueueUrl: "https://example.com",
|
|
||||||
MessageQueueAccessToken: "1234567890",
|
|
||||||
Statistics: nil,
|
|
||||||
}
|
|
||||||
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
|
|
||||||
|
|
||||||
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, &actions.MessageQueueTokenExpiredError{}).Twice()
|
|
||||||
|
|
||||||
config.Client = client
|
|
||||||
|
|
||||||
l, err := New(config)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
l.session = &actions.RunnerScaleSetSession{
|
|
||||||
SessionId: &uuid,
|
|
||||||
RunnerScaleSet: &actions.RunnerScaleSet{},
|
|
||||||
}
|
|
||||||
|
|
||||||
got, err := l.getMessage(ctx)
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
assert.Nil(t, got)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestListener_refreshSession(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
t.Run("SuccessfullyRefreshes", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
config := Config{
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics.Discard,
|
|
||||||
}
|
|
||||||
|
|
||||||
client := listenermocks.NewClient(t)
|
|
||||||
|
|
||||||
newUUID := uuid.New()
|
|
||||||
session := &actions.RunnerScaleSetSession{
|
|
||||||
SessionId: &newUUID,
|
|
||||||
OwnerName: "example",
|
|
||||||
RunnerScaleSet: &actions.RunnerScaleSet{},
|
|
||||||
MessageQueueUrl: "https://example.com",
|
|
||||||
MessageQueueAccessToken: "1234567890",
|
|
||||||
Statistics: nil,
|
|
||||||
}
|
|
||||||
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
|
|
||||||
|
|
||||||
config.Client = client
|
|
||||||
|
|
||||||
l, err := New(config)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
oldUUID := uuid.New()
|
|
||||||
l.session = &actions.RunnerScaleSetSession{
|
|
||||||
SessionId: &oldUUID,
|
|
||||||
RunnerScaleSet: &actions.RunnerScaleSet{},
|
|
||||||
}
|
|
||||||
|
|
||||||
err = l.refreshSession(ctx)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, session, l.session)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("FailsToRefresh", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
config := Config{
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics.Discard,
|
|
||||||
}
|
|
||||||
|
|
||||||
client := listenermocks.NewClient(t)
|
|
||||||
|
|
||||||
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(nil, errors.New("error")).Once()
|
|
||||||
|
|
||||||
config.Client = client
|
|
||||||
|
|
||||||
l, err := New(config)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
oldUUID := uuid.New()
|
|
||||||
oldSession := &actions.RunnerScaleSetSession{
|
|
||||||
SessionId: &oldUUID,
|
|
||||||
RunnerScaleSet: &actions.RunnerScaleSet{},
|
|
||||||
}
|
|
||||||
l.session = oldSession
|
|
||||||
|
|
||||||
err = l.refreshSession(ctx)
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
assert.Equal(t, oldSession, l.session)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestListener_deleteLastMessage(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
t.Run("SuccessfullyDeletes", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
config := Config{
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics.Discard,
|
|
||||||
}
|
|
||||||
|
|
||||||
client := listenermocks.NewClient(t)
|
|
||||||
|
|
||||||
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.MatchedBy(func(lastMessageID any) bool {
|
|
||||||
return lastMessageID.(int64) == int64(5)
|
|
||||||
})).Return(nil).Once()
|
|
||||||
|
|
||||||
config.Client = client
|
|
||||||
|
|
||||||
l, err := New(config)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
l.session = &actions.RunnerScaleSetSession{}
|
|
||||||
l.lastMessageID = 5
|
|
||||||
|
|
||||||
err = l.deleteLastMessage(ctx)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("FailsToDelete", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
config := Config{
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics.Discard,
|
|
||||||
}
|
|
||||||
|
|
||||||
client := listenermocks.NewClient(t)
|
|
||||||
|
|
||||||
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("error")).Once()
|
|
||||||
|
|
||||||
config.Client = client
|
|
||||||
|
|
||||||
l, err := New(config)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
l.session = &actions.RunnerScaleSetSession{}
|
|
||||||
l.lastMessageID = 5
|
|
||||||
|
|
||||||
err = l.deleteLastMessage(ctx)
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestListener_Listen(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
t.Run("CreateSessionFails", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
ctx := context.Background()
|
|
||||||
config := Config{
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics.Discard,
|
|
||||||
}
|
|
||||||
|
|
||||||
client := listenermocks.NewClient(t)
|
|
||||||
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(nil, assert.AnError).Once()
|
|
||||||
config.Client = client
|
|
||||||
|
|
||||||
l, err := New(config)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
err = l.Listen(ctx, nil)
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("CallHandleRegardlessOfInitialMessage", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
config := Config{
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics.Discard,
|
|
||||||
}
|
|
||||||
|
|
||||||
client := listenermocks.NewClient(t)
|
|
||||||
|
|
||||||
uuid := uuid.New()
|
|
||||||
session := &actions.RunnerScaleSetSession{
|
|
||||||
SessionId: &uuid,
|
|
||||||
OwnerName: "example",
|
|
||||||
RunnerScaleSet: &actions.RunnerScaleSet{},
|
|
||||||
MessageQueueUrl: "https://example.com",
|
|
||||||
MessageQueueAccessToken: "1234567890",
|
|
||||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
|
||||||
}
|
|
||||||
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
|
|
||||||
client.On("DeleteMessageSession", mock.Anything, session.RunnerScaleSet.Id, session.SessionId).Return(nil).Once()
|
|
||||||
|
|
||||||
config.Client = client
|
|
||||||
|
|
||||||
l, err := New(config)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
var called bool
|
|
||||||
handler := listenermocks.NewHandler(t)
|
|
||||||
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything).
|
|
||||||
Return(0, nil).
|
|
||||||
Run(
|
|
||||||
func(mock.Arguments) {
|
|
||||||
called = true
|
|
||||||
cancel()
|
|
||||||
},
|
|
||||||
).
|
|
||||||
Once()
|
|
||||||
|
|
||||||
err = l.Listen(ctx, handler)
|
|
||||||
assert.True(t, errors.Is(err, context.Canceled))
|
|
||||||
assert.True(t, called)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("CancelContextAfterGetMessage", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
config := Config{
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics.Discard,
|
|
||||||
}
|
|
||||||
|
|
||||||
client := listenermocks.NewClient(t)
|
|
||||||
uuid := uuid.New()
|
|
||||||
session := &actions.RunnerScaleSetSession{
|
|
||||||
SessionId: &uuid,
|
|
||||||
OwnerName: "example",
|
|
||||||
RunnerScaleSet: &actions.RunnerScaleSet{},
|
|
||||||
MessageQueueUrl: "https://example.com",
|
|
||||||
MessageQueueAccessToken: "1234567890",
|
|
||||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
|
||||||
}
|
|
||||||
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
|
|
||||||
client.On("DeleteMessageSession", mock.Anything, session.RunnerScaleSet.Id, session.SessionId).Return(nil).Once()
|
|
||||||
|
|
||||||
msg := &actions.RunnerScaleSetMessage{
|
|
||||||
MessageId: 1,
|
|
||||||
MessageType: "RunnerScaleSetJobMessages",
|
|
||||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
|
||||||
}
|
|
||||||
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything).
|
|
||||||
Return(msg, nil).
|
|
||||||
Run(
|
|
||||||
func(mock.Arguments) {
|
|
||||||
cancel()
|
|
||||||
},
|
|
||||||
).
|
|
||||||
Once()
|
|
||||||
|
|
||||||
// Ensure delete message is called with background context
|
|
||||||
client.On("DeleteMessage", context.Background(), mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
|
|
||||||
|
|
||||||
config.Client = client
|
|
||||||
|
|
||||||
handler := listenermocks.NewHandler(t)
|
|
||||||
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything).
|
|
||||||
Return(0, nil).
|
|
||||||
Once()
|
|
||||||
|
|
||||||
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything).
|
|
||||||
Return(0, nil).
|
|
||||||
Once()
|
|
||||||
|
|
||||||
l, err := New(config)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
err = l.Listen(ctx, handler)
|
|
||||||
assert.ErrorIs(t, context.Canceled, err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestListener_acquireAvailableJobs(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
t.Run("FailingToAcquireJobs", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
config := Config{
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics.Discard,
|
|
||||||
}
|
|
||||||
|
|
||||||
client := listenermocks.NewClient(t)
|
|
||||||
|
|
||||||
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, assert.AnError).Once()
|
|
||||||
|
|
||||||
config.Client = client
|
|
||||||
|
|
||||||
l, err := New(config)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
uuid := uuid.New()
|
|
||||||
l.session = &actions.RunnerScaleSetSession{
|
|
||||||
SessionId: &uuid,
|
|
||||||
OwnerName: "example",
|
|
||||||
RunnerScaleSet: &actions.RunnerScaleSet{},
|
|
||||||
MessageQueueUrl: "https://example.com",
|
|
||||||
MessageQueueAccessToken: "1234567890",
|
|
||||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
|
||||||
}
|
|
||||||
|
|
||||||
availableJobs := []*actions.JobAvailable{
|
|
||||||
{
|
|
||||||
JobMessageBase: actions.JobMessageBase{
|
|
||||||
RunnerRequestId: 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
JobMessageBase: actions.JobMessageBase{
|
|
||||||
RunnerRequestId: 2,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
JobMessageBase: actions.JobMessageBase{
|
|
||||||
RunnerRequestId: 3,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_, err = l.acquireAvailableJobs(ctx, availableJobs)
|
|
||||||
assert.Error(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("SuccessfullyAcquiresJobsOnFirstRun", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
config := Config{
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics.Discard,
|
|
||||||
}
|
|
||||||
|
|
||||||
client := listenermocks.NewClient(t)
|
|
||||||
|
|
||||||
jobIDs := []int64{1, 2, 3}
|
|
||||||
|
|
||||||
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(jobIDs, nil).Once()
|
|
||||||
|
|
||||||
config.Client = client
|
|
||||||
|
|
||||||
l, err := New(config)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
uuid := uuid.New()
|
|
||||||
l.session = &actions.RunnerScaleSetSession{
|
|
||||||
SessionId: &uuid,
|
|
||||||
OwnerName: "example",
|
|
||||||
RunnerScaleSet: &actions.RunnerScaleSet{},
|
|
||||||
MessageQueueUrl: "https://example.com",
|
|
||||||
MessageQueueAccessToken: "1234567890",
|
|
||||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
|
||||||
}
|
|
||||||
|
|
||||||
availableJobs := []*actions.JobAvailable{
|
|
||||||
{
|
|
||||||
JobMessageBase: actions.JobMessageBase{
|
|
||||||
RunnerRequestId: 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
JobMessageBase: actions.JobMessageBase{
|
|
||||||
RunnerRequestId: 2,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
JobMessageBase: actions.JobMessageBase{
|
|
||||||
RunnerRequestId: 3,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
acquiredJobIDs, err := l.acquireAvailableJobs(ctx, availableJobs)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, []int64{1, 2, 3}, acquiredJobIDs)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("RefreshAndSucceeds", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
config := Config{
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics.Discard,
|
|
||||||
}
|
|
||||||
|
|
||||||
client := listenermocks.NewClient(t)
|
|
||||||
|
|
||||||
uuid := uuid.New()
|
|
||||||
session := &actions.RunnerScaleSetSession{
|
|
||||||
SessionId: &uuid,
|
|
||||||
OwnerName: "example",
|
|
||||||
RunnerScaleSet: &actions.RunnerScaleSet{},
|
|
||||||
MessageQueueUrl: "https://example.com",
|
|
||||||
MessageQueueAccessToken: "1234567890",
|
|
||||||
Statistics: nil,
|
|
||||||
}
|
|
||||||
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
|
|
||||||
|
|
||||||
// First call to AcquireJobs will fail with a token expired error
|
|
||||||
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once()
|
|
||||||
|
|
||||||
// Second call to AcquireJobs will succeed
|
|
||||||
want := []int64{1, 2, 3}
|
|
||||||
availableJobs := []*actions.JobAvailable{
|
|
||||||
{
|
|
||||||
JobMessageBase: actions.JobMessageBase{
|
|
||||||
RunnerRequestId: 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
JobMessageBase: actions.JobMessageBase{
|
|
||||||
RunnerRequestId: 2,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
JobMessageBase: actions.JobMessageBase{
|
|
||||||
RunnerRequestId: 3,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(want, nil).Once()
|
|
||||||
|
|
||||||
config.Client = client
|
|
||||||
|
|
||||||
l, err := New(config)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
l.session = &actions.RunnerScaleSetSession{
|
|
||||||
SessionId: &uuid,
|
|
||||||
RunnerScaleSet: &actions.RunnerScaleSet{},
|
|
||||||
}
|
|
||||||
|
|
||||||
got, err := l.acquireAvailableJobs(ctx, availableJobs)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, want, got)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("RefreshAndFails", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
config := Config{
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics.Discard,
|
|
||||||
}
|
|
||||||
|
|
||||||
client := listenermocks.NewClient(t)
|
|
||||||
|
|
||||||
uuid := uuid.New()
|
|
||||||
session := &actions.RunnerScaleSetSession{
|
|
||||||
SessionId: &uuid,
|
|
||||||
OwnerName: "example",
|
|
||||||
RunnerScaleSet: &actions.RunnerScaleSet{},
|
|
||||||
MessageQueueUrl: "https://example.com",
|
|
||||||
MessageQueueAccessToken: "1234567890",
|
|
||||||
Statistics: nil,
|
|
||||||
}
|
|
||||||
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
|
|
||||||
|
|
||||||
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, &actions.MessageQueueTokenExpiredError{}).Twice()
|
|
||||||
|
|
||||||
config.Client = client
|
|
||||||
|
|
||||||
l, err := New(config)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
l.session = &actions.RunnerScaleSetSession{
|
|
||||||
SessionId: &uuid,
|
|
||||||
RunnerScaleSet: &actions.RunnerScaleSet{},
|
|
||||||
}
|
|
||||||
|
|
||||||
availableJobs := []*actions.JobAvailable{
|
|
||||||
{
|
|
||||||
JobMessageBase: actions.JobMessageBase{
|
|
||||||
RunnerRequestId: 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
JobMessageBase: actions.JobMessageBase{
|
|
||||||
RunnerRequestId: 2,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
JobMessageBase: actions.JobMessageBase{
|
|
||||||
RunnerRequestId: 3,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
got, err := l.acquireAvailableJobs(ctx, availableJobs)
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
assert.Nil(t, got)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestListener_parseMessage(t *testing.T) {
|
|
||||||
t.Run("FailOnEmptyStatistics", func(t *testing.T) {
|
|
||||||
msg := &actions.RunnerScaleSetMessage{
|
|
||||||
MessageId: 1,
|
|
||||||
MessageType: "RunnerScaleSetJobMessages",
|
|
||||||
Statistics: nil,
|
|
||||||
}
|
|
||||||
|
|
||||||
l := &Listener{}
|
|
||||||
parsedMsg, err := l.parseMessage(context.Background(), msg)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Nil(t, parsedMsg)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("FailOnIncorrectMessageType", func(t *testing.T) {
|
|
||||||
msg := &actions.RunnerScaleSetMessage{
|
|
||||||
MessageId: 1,
|
|
||||||
MessageType: "RunnerMessages", // arbitrary message type
|
|
||||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
|
||||||
}
|
|
||||||
|
|
||||||
l := &Listener{}
|
|
||||||
parsedMsg, err := l.parseMessage(context.Background(), msg)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Nil(t, parsedMsg)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("ParseAll", func(t *testing.T) {
|
|
||||||
msg := &actions.RunnerScaleSetMessage{
|
|
||||||
MessageId: 1,
|
|
||||||
MessageType: "RunnerScaleSetJobMessages",
|
|
||||||
Body: "",
|
|
||||||
Statistics: &actions.RunnerScaleSetStatistic{
|
|
||||||
TotalAvailableJobs: 1,
|
|
||||||
TotalAcquiredJobs: 2,
|
|
||||||
TotalAssignedJobs: 3,
|
|
||||||
TotalRunningJobs: 4,
|
|
||||||
TotalRegisteredRunners: 5,
|
|
||||||
TotalBusyRunners: 6,
|
|
||||||
TotalIdleRunners: 7,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var batchedMessages []any
|
|
||||||
jobsAvailable := []*actions.JobAvailable{
|
|
||||||
{
|
|
||||||
AcquireJobUrl: "https://github.com/example",
|
|
||||||
JobMessageBase: actions.JobMessageBase{
|
|
||||||
JobMessageType: actions.JobMessageType{
|
|
||||||
MessageType: messageTypeJobAvailable,
|
|
||||||
},
|
|
||||||
RunnerRequestId: 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
AcquireJobUrl: "https://github.com/example",
|
|
||||||
JobMessageBase: actions.JobMessageBase{
|
|
||||||
JobMessageType: actions.JobMessageType{
|
|
||||||
MessageType: messageTypeJobAvailable,
|
|
||||||
},
|
|
||||||
RunnerRequestId: 2,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, msg := range jobsAvailable {
|
|
||||||
batchedMessages = append(batchedMessages, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
jobsAssigned := []*actions.JobAssigned{
|
|
||||||
{
|
|
||||||
JobMessageBase: actions.JobMessageBase{
|
|
||||||
JobMessageType: actions.JobMessageType{
|
|
||||||
MessageType: messageTypeJobAssigned,
|
|
||||||
},
|
|
||||||
RunnerRequestId: 3,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
JobMessageBase: actions.JobMessageBase{
|
|
||||||
JobMessageType: actions.JobMessageType{
|
|
||||||
MessageType: messageTypeJobAssigned,
|
|
||||||
},
|
|
||||||
RunnerRequestId: 4,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, msg := range jobsAssigned {
|
|
||||||
batchedMessages = append(batchedMessages, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
jobsStarted := []*actions.JobStarted{
|
|
||||||
{
|
|
||||||
JobMessageBase: actions.JobMessageBase{
|
|
||||||
JobMessageType: actions.JobMessageType{
|
|
||||||
MessageType: messageTypeJobStarted,
|
|
||||||
},
|
|
||||||
RunnerRequestId: 5,
|
|
||||||
},
|
|
||||||
RunnerId: 2,
|
|
||||||
RunnerName: "runner2",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, msg := range jobsStarted {
|
|
||||||
batchedMessages = append(batchedMessages, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
jobsCompleted := []*actions.JobCompleted{
|
|
||||||
{
|
|
||||||
JobMessageBase: actions.JobMessageBase{
|
|
||||||
JobMessageType: actions.JobMessageType{
|
|
||||||
MessageType: messageTypeJobCompleted,
|
|
||||||
},
|
|
||||||
RunnerRequestId: 6,
|
|
||||||
},
|
|
||||||
Result: "success",
|
|
||||||
RunnerId: 1,
|
|
||||||
RunnerName: "runner1",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, msg := range jobsCompleted {
|
|
||||||
batchedMessages = append(batchedMessages, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
b, err := json.Marshal(batchedMessages)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
msg.Body = string(b)
|
|
||||||
|
|
||||||
l := &Listener{}
|
|
||||||
parsedMsg, err := l.parseMessage(context.Background(), msg)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, msg.Statistics, parsedMsg.statistics)
|
|
||||||
assert.Equal(t, jobsAvailable, parsedMsg.jobsAvailable)
|
|
||||||
assert.Equal(t, jobsStarted, parsedMsg.jobsStarted)
|
|
||||||
assert.Equal(t, jobsCompleted, parsedMsg.jobsCompleted)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,205 +0,0 @@
|
|||||||
package listener
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
listenermocks "github.com/actions/actions-runner-controller/cmd/ghalistener/listener/mocks"
|
|
||||||
metricsmocks "github.com/actions/actions-runner-controller/cmd/ghalistener/metrics/mocks"
|
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/mock"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestInitialMetrics(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
t.Run("SetStaticMetrics", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
metrics := metricsmocks.NewPublisher(t)
|
|
||||||
|
|
||||||
minRunners := 5
|
|
||||||
maxRunners := 10
|
|
||||||
metrics.On("PublishStatic", minRunners, maxRunners).Once()
|
|
||||||
|
|
||||||
config := Config{
|
|
||||||
Client: listenermocks.NewClient(t),
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics,
|
|
||||||
MinRunners: minRunners,
|
|
||||||
MaxRunners: maxRunners,
|
|
||||||
}
|
|
||||||
l, err := New(config)
|
|
||||||
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.NotNil(t, l)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("InitialMessageStatistics", func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
sessionStatistics := &actions.RunnerScaleSetStatistic{
|
|
||||||
TotalAvailableJobs: 1,
|
|
||||||
TotalAcquiredJobs: 2,
|
|
||||||
TotalAssignedJobs: 3,
|
|
||||||
TotalRunningJobs: 4,
|
|
||||||
TotalRegisteredRunners: 5,
|
|
||||||
TotalBusyRunners: 6,
|
|
||||||
TotalIdleRunners: 7,
|
|
||||||
}
|
|
||||||
|
|
||||||
uuid := uuid.New()
|
|
||||||
session := &actions.RunnerScaleSetSession{
|
|
||||||
SessionId: &uuid,
|
|
||||||
OwnerName: "example",
|
|
||||||
RunnerScaleSet: &actions.RunnerScaleSet{},
|
|
||||||
MessageQueueUrl: "https://example.com",
|
|
||||||
MessageQueueAccessToken: "1234567890",
|
|
||||||
Statistics: sessionStatistics,
|
|
||||||
}
|
|
||||||
|
|
||||||
metrics := metricsmocks.NewPublisher(t)
|
|
||||||
metrics.On("PublishStatic", mock.Anything, mock.Anything).Once()
|
|
||||||
metrics.On("PublishStatistics", sessionStatistics).Once()
|
|
||||||
metrics.On("PublishDesiredRunners", sessionStatistics.TotalAssignedJobs).
|
|
||||||
Run(
|
|
||||||
func(mock.Arguments) {
|
|
||||||
cancel()
|
|
||||||
},
|
|
||||||
).Once()
|
|
||||||
|
|
||||||
config := Config{
|
|
||||||
Client: listenermocks.NewClient(t),
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics,
|
|
||||||
}
|
|
||||||
|
|
||||||
client := listenermocks.NewClient(t)
|
|
||||||
client.On("CreateMessageSession", mock.Anything, mock.Anything, mock.Anything).Return(session, nil).Once()
|
|
||||||
client.On("DeleteMessageSession", mock.Anything, session.RunnerScaleSet.Id, session.SessionId).Return(nil).Once()
|
|
||||||
config.Client = client
|
|
||||||
|
|
||||||
handler := listenermocks.NewHandler(t)
|
|
||||||
handler.On("HandleDesiredRunnerCount", mock.Anything, sessionStatistics.TotalAssignedJobs).
|
|
||||||
Return(sessionStatistics.TotalAssignedJobs, nil).
|
|
||||||
Once()
|
|
||||||
|
|
||||||
l, err := New(config)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.NotNil(t, l)
|
|
||||||
|
|
||||||
assert.ErrorIs(t, context.Canceled, l.Listen(ctx, handler))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHandleMessageMetrics(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
msg := &actions.RunnerScaleSetMessage{
|
|
||||||
MessageId: 1,
|
|
||||||
MessageType: "RunnerScaleSetJobMessages",
|
|
||||||
Body: "",
|
|
||||||
Statistics: &actions.RunnerScaleSetStatistic{
|
|
||||||
TotalAvailableJobs: 1,
|
|
||||||
TotalAcquiredJobs: 2,
|
|
||||||
TotalAssignedJobs: 3,
|
|
||||||
TotalRunningJobs: 4,
|
|
||||||
TotalRegisteredRunners: 5,
|
|
||||||
TotalBusyRunners: 6,
|
|
||||||
TotalIdleRunners: 7,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var batchedMessages []any
|
|
||||||
jobsStarted := []*actions.JobStarted{
|
|
||||||
{
|
|
||||||
JobMessageBase: actions.JobMessageBase{
|
|
||||||
JobMessageType: actions.JobMessageType{
|
|
||||||
MessageType: messageTypeJobStarted,
|
|
||||||
},
|
|
||||||
RunnerRequestId: 8,
|
|
||||||
},
|
|
||||||
RunnerId: 3,
|
|
||||||
RunnerName: "runner3",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, msg := range jobsStarted {
|
|
||||||
batchedMessages = append(batchedMessages, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
jobsCompleted := []*actions.JobCompleted{
|
|
||||||
{
|
|
||||||
JobMessageBase: actions.JobMessageBase{
|
|
||||||
JobMessageType: actions.JobMessageType{
|
|
||||||
MessageType: messageTypeJobCompleted,
|
|
||||||
},
|
|
||||||
RunnerRequestId: 6,
|
|
||||||
},
|
|
||||||
Result: "success",
|
|
||||||
RunnerId: 1,
|
|
||||||
RunnerName: "runner1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
JobMessageBase: actions.JobMessageBase{
|
|
||||||
JobMessageType: actions.JobMessageType{
|
|
||||||
MessageType: messageTypeJobCompleted,
|
|
||||||
},
|
|
||||||
RunnerRequestId: 7,
|
|
||||||
},
|
|
||||||
Result: "success",
|
|
||||||
RunnerId: 2,
|
|
||||||
RunnerName: "runner2",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, msg := range jobsCompleted {
|
|
||||||
batchedMessages = append(batchedMessages, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
b, err := json.Marshal(batchedMessages)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
msg.Body = string(b)
|
|
||||||
|
|
||||||
desiredResult := 4
|
|
||||||
|
|
||||||
metrics := metricsmocks.NewPublisher(t)
|
|
||||||
metrics.On("PublishStatic", 0, 0).Once()
|
|
||||||
metrics.On("PublishStatistics", msg.Statistics).Once()
|
|
||||||
metrics.On("PublishJobCompleted", jobsCompleted[0]).Once()
|
|
||||||
metrics.On("PublishJobCompleted", jobsCompleted[1]).Once()
|
|
||||||
metrics.On("PublishJobStarted", jobsStarted[0]).Once()
|
|
||||||
metrics.On("PublishDesiredRunners", desiredResult).Once()
|
|
||||||
|
|
||||||
handler := listenermocks.NewHandler(t)
|
|
||||||
handler.On("HandleJobStarted", mock.Anything, jobsStarted[0]).Return(nil).Once()
|
|
||||||
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything).Return(desiredResult, nil).Once()
|
|
||||||
|
|
||||||
client := listenermocks.NewClient(t)
|
|
||||||
client.On("DeleteMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
|
|
||||||
|
|
||||||
config := Config{
|
|
||||||
Client: listenermocks.NewClient(t),
|
|
||||||
ScaleSetID: 1,
|
|
||||||
Metrics: metrics,
|
|
||||||
}
|
|
||||||
|
|
||||||
l, err := New(config)
|
|
||||||
require.NoError(t, err)
|
|
||||||
l.client = client
|
|
||||||
l.session = &actions.RunnerScaleSetSession{
|
|
||||||
OwnerName: "",
|
|
||||||
RunnerScaleSet: &actions.RunnerScaleSet{},
|
|
||||||
MessageQueueUrl: "",
|
|
||||||
MessageQueueAccessToken: "",
|
|
||||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
|
||||||
}
|
|
||||||
|
|
||||||
err = l.handleMessage(context.Background(), handler, msg)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
@@ -1,190 +0,0 @@
|
|||||||
// Code generated by mockery v2.36.1. DO NOT EDIT.
|
|
||||||
|
|
||||||
package mocks
|
|
||||||
|
|
||||||
import (
|
|
||||||
context "context"
|
|
||||||
|
|
||||||
actions "github.com/actions/actions-runner-controller/github/actions"
|
|
||||||
|
|
||||||
mock "github.com/stretchr/testify/mock"
|
|
||||||
|
|
||||||
uuid "github.com/google/uuid"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Client is an autogenerated mock type for the Client type
|
|
||||||
type Client struct {
|
|
||||||
mock.Mock
|
|
||||||
}
|
|
||||||
|
|
||||||
// AcquireJobs provides a mock function with given fields: ctx, runnerScaleSetId, messageQueueAccessToken, requestIds
|
|
||||||
func (_m *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) {
|
|
||||||
ret := _m.Called(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
|
|
||||||
|
|
||||||
var r0 []int64
|
|
||||||
var r1 error
|
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, int, string, []int64) ([]int64, error)); ok {
|
|
||||||
return rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
|
|
||||||
}
|
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, int, string, []int64) []int64); ok {
|
|
||||||
r0 = rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
|
|
||||||
} else {
|
|
||||||
if ret.Get(0) != nil {
|
|
||||||
r0 = ret.Get(0).([]int64)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if rf, ok := ret.Get(1).(func(context.Context, int, string, []int64) error); ok {
|
|
||||||
r1 = rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
|
|
||||||
} else {
|
|
||||||
r1 = ret.Error(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
return r0, r1
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, owner
|
|
||||||
func (_m *Client) CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*actions.RunnerScaleSetSession, error) {
|
|
||||||
ret := _m.Called(ctx, runnerScaleSetId, owner)
|
|
||||||
|
|
||||||
var r0 *actions.RunnerScaleSetSession
|
|
||||||
var r1 error
|
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, int, string) (*actions.RunnerScaleSetSession, error)); ok {
|
|
||||||
return rf(ctx, runnerScaleSetId, owner)
|
|
||||||
}
|
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, int, string) *actions.RunnerScaleSetSession); ok {
|
|
||||||
r0 = rf(ctx, runnerScaleSetId, owner)
|
|
||||||
} else {
|
|
||||||
if ret.Get(0) != nil {
|
|
||||||
r0 = ret.Get(0).(*actions.RunnerScaleSetSession)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if rf, ok := ret.Get(1).(func(context.Context, int, string) error); ok {
|
|
||||||
r1 = rf(ctx, runnerScaleSetId, owner)
|
|
||||||
} else {
|
|
||||||
r1 = ret.Error(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
return r0, r1
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, messageId
|
|
||||||
func (_m *Client) DeleteMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, messageId int64) error {
|
|
||||||
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, messageId)
|
|
||||||
|
|
||||||
var r0 error
|
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) error); ok {
|
|
||||||
r0 = rf(ctx, messageQueueUrl, messageQueueAccessToken, messageId)
|
|
||||||
} else {
|
|
||||||
r0 = ret.Error(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
return r0
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
|
|
||||||
func (_m *Client) DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error {
|
|
||||||
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
|
|
||||||
|
|
||||||
var r0 error
|
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) error); ok {
|
|
||||||
r0 = rf(ctx, runnerScaleSetId, sessionId)
|
|
||||||
} else {
|
|
||||||
r0 = ret.Error(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
return r0
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAcquirableJobs provides a mock function with given fields: ctx, runnerScaleSetId
|
|
||||||
func (_m *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*actions.AcquirableJobList, error) {
|
|
||||||
ret := _m.Called(ctx, runnerScaleSetId)
|
|
||||||
|
|
||||||
var r0 *actions.AcquirableJobList
|
|
||||||
var r1 error
|
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, int) (*actions.AcquirableJobList, error)); ok {
|
|
||||||
return rf(ctx, runnerScaleSetId)
|
|
||||||
}
|
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, int) *actions.AcquirableJobList); ok {
|
|
||||||
r0 = rf(ctx, runnerScaleSetId)
|
|
||||||
} else {
|
|
||||||
if ret.Get(0) != nil {
|
|
||||||
r0 = ret.Get(0).(*actions.AcquirableJobList)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if rf, ok := ret.Get(1).(func(context.Context, int) error); ok {
|
|
||||||
r1 = rf(ctx, runnerScaleSetId)
|
|
||||||
} else {
|
|
||||||
r1 = ret.Error(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
return r0, r1
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId
|
|
||||||
func (_m *Client) GetMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, lastMessageId int64) (*actions.RunnerScaleSetMessage, error) {
|
|
||||||
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId)
|
|
||||||
|
|
||||||
var r0 *actions.RunnerScaleSetMessage
|
|
||||||
var r1 error
|
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) (*actions.RunnerScaleSetMessage, error)); ok {
|
|
||||||
return rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId)
|
|
||||||
}
|
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) *actions.RunnerScaleSetMessage); ok {
|
|
||||||
r0 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId)
|
|
||||||
} else {
|
|
||||||
if ret.Get(0) != nil {
|
|
||||||
r0 = ret.Get(0).(*actions.RunnerScaleSetMessage)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if rf, ok := ret.Get(1).(func(context.Context, string, string, int64) error); ok {
|
|
||||||
r1 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId)
|
|
||||||
} else {
|
|
||||||
r1 = ret.Error(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
return r0, r1
|
|
||||||
}
|
|
||||||
|
|
||||||
// RefreshMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
|
|
||||||
func (_m *Client) RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*actions.RunnerScaleSetSession, error) {
|
|
||||||
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
|
|
||||||
|
|
||||||
var r0 *actions.RunnerScaleSetSession
|
|
||||||
var r1 error
|
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) (*actions.RunnerScaleSetSession, error)); ok {
|
|
||||||
return rf(ctx, runnerScaleSetId, sessionId)
|
|
||||||
}
|
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) *actions.RunnerScaleSetSession); ok {
|
|
||||||
r0 = rf(ctx, runnerScaleSetId, sessionId)
|
|
||||||
} else {
|
|
||||||
if ret.Get(0) != nil {
|
|
||||||
r0 = ret.Get(0).(*actions.RunnerScaleSetSession)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if rf, ok := ret.Get(1).(func(context.Context, int, *uuid.UUID) error); ok {
|
|
||||||
r1 = rf(ctx, runnerScaleSetId, sessionId)
|
|
||||||
} else {
|
|
||||||
r1 = ret.Error(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
return r0, r1
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
|
||||||
// The first argument is typically a *testing.T value.
|
|
||||||
func NewClient(t interface {
|
|
||||||
mock.TestingT
|
|
||||||
Cleanup(func())
|
|
||||||
}) *Client {
|
|
||||||
mock := &Client{}
|
|
||||||
mock.Mock.Test(t)
|
|
||||||
|
|
||||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
|
||||||
|
|
||||||
return mock
|
|
||||||
}
|
|
||||||
@@ -1,68 +0,0 @@
|
|||||||
// Code generated by mockery v2.36.1. DO NOT EDIT.
|
|
||||||
|
|
||||||
package mocks
|
|
||||||
|
|
||||||
import (
|
|
||||||
context "context"
|
|
||||||
|
|
||||||
actions "github.com/actions/actions-runner-controller/github/actions"
|
|
||||||
|
|
||||||
mock "github.com/stretchr/testify/mock"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Handler is an autogenerated mock type for the Handler type
|
|
||||||
type Handler struct {
|
|
||||||
mock.Mock
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count
|
|
||||||
func (_m *Handler) HandleDesiredRunnerCount(ctx context.Context, count int) (int, error) {
|
|
||||||
ret := _m.Called(ctx, count)
|
|
||||||
|
|
||||||
var r0 int
|
|
||||||
var r1 error
|
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, int) (int, error)); ok {
|
|
||||||
return rf(ctx, count)
|
|
||||||
}
|
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, int) int); ok {
|
|
||||||
r0 = rf(ctx, count)
|
|
||||||
} else {
|
|
||||||
r0 = ret.Get(0).(int)
|
|
||||||
}
|
|
||||||
|
|
||||||
if rf, ok := ret.Get(1).(func(context.Context, int) error); ok {
|
|
||||||
r1 = rf(ctx, count)
|
|
||||||
} else {
|
|
||||||
r1 = ret.Error(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
return r0, r1
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleJobStarted provides a mock function with given fields: ctx, jobInfo
|
|
||||||
func (_m *Handler) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
|
|
||||||
ret := _m.Called(ctx, jobInfo)
|
|
||||||
|
|
||||||
var r0 error
|
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, *actions.JobStarted) error); ok {
|
|
||||||
r0 = rf(ctx, jobInfo)
|
|
||||||
} else {
|
|
||||||
r0 = ret.Error(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
return r0
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHandler creates a new instance of Handler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
|
||||||
// The first argument is typically a *testing.T value.
|
|
||||||
func NewHandler(t interface {
|
|
||||||
mock.TestingT
|
|
||||||
Cleanup(func())
|
|
||||||
}) *Handler {
|
|
||||||
mock := &Handler{}
|
|
||||||
mock.Mock.Test(t)
|
|
||||||
|
|
||||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
|
||||||
|
|
||||||
return mock
|
|
||||||
}
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/actions/actions-runner-controller/cmd/ghalistener/app"
|
|
||||||
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
configPath, ok := os.LookupEnv("LISTENER_CONFIG_PATH")
|
|
||||||
if !ok {
|
|
||||||
fmt.Fprintf(os.Stderr, "Error: LISTENER_CONFIG_PATH environment variable is not set\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
config, err := config.Read(configPath)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Failed to read config: %v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
app, err := app.New(config)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Failed to initialize app: %v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
|
||||||
defer stop()
|
|
||||||
|
|
||||||
if err := app.Run(ctx); err != nil {
|
|
||||||
log.Printf("Application returned an error: %v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,387 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
|
||||||
"github.com/go-logr/logr"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
labelKeyRunnerScaleSetName = "name"
|
|
||||||
labelKeyRunnerScaleSetNamespace = "namespace"
|
|
||||||
labelKeyEnterprise = "enterprise"
|
|
||||||
labelKeyOrganization = "organization"
|
|
||||||
labelKeyRepository = "repository"
|
|
||||||
labelKeyJobName = "job_name"
|
|
||||||
labelKeyJobWorkflowRef = "job_workflow_ref"
|
|
||||||
labelKeyEventName = "event_name"
|
|
||||||
labelKeyJobResult = "job_result"
|
|
||||||
labelKeyRunnerID = "runner_id"
|
|
||||||
labelKeyRunnerName = "runner_name"
|
|
||||||
)
|
|
||||||
|
|
||||||
const githubScaleSetSubsystem = "gha"
|
|
||||||
|
|
||||||
// labels
|
|
||||||
var (
|
|
||||||
scaleSetLabels = []string{
|
|
||||||
labelKeyRunnerScaleSetName,
|
|
||||||
labelKeyRepository,
|
|
||||||
labelKeyOrganization,
|
|
||||||
labelKeyEnterprise,
|
|
||||||
labelKeyRunnerScaleSetNamespace,
|
|
||||||
}
|
|
||||||
|
|
||||||
jobLabels = []string{
|
|
||||||
labelKeyRepository,
|
|
||||||
labelKeyOrganization,
|
|
||||||
labelKeyEnterprise,
|
|
||||||
labelKeyJobName,
|
|
||||||
labelKeyJobWorkflowRef,
|
|
||||||
labelKeyEventName,
|
|
||||||
}
|
|
||||||
|
|
||||||
completedJobsTotalLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
|
|
||||||
jobExecutionDurationLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
|
|
||||||
startedJobsTotalLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
|
|
||||||
jobStartupDurationLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
assignedJobs = prometheus.NewGaugeVec(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Subsystem: githubScaleSetSubsystem,
|
|
||||||
Name: "assigned_jobs",
|
|
||||||
Help: "Number of jobs assigned to this scale set.",
|
|
||||||
},
|
|
||||||
scaleSetLabels,
|
|
||||||
)
|
|
||||||
|
|
||||||
runningJobs = prometheus.NewGaugeVec(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Subsystem: githubScaleSetSubsystem,
|
|
||||||
Name: "running_jobs",
|
|
||||||
Help: "Number of jobs running (or about to be run).",
|
|
||||||
},
|
|
||||||
scaleSetLabels,
|
|
||||||
)
|
|
||||||
|
|
||||||
registeredRunners = prometheus.NewGaugeVec(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Subsystem: githubScaleSetSubsystem,
|
|
||||||
Name: "registered_runners",
|
|
||||||
Help: "Number of runners registered by the scale set.",
|
|
||||||
},
|
|
||||||
scaleSetLabels,
|
|
||||||
)
|
|
||||||
|
|
||||||
busyRunners = prometheus.NewGaugeVec(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Subsystem: githubScaleSetSubsystem,
|
|
||||||
Name: "busy_runners",
|
|
||||||
Help: "Number of registered runners running a job.",
|
|
||||||
},
|
|
||||||
scaleSetLabels,
|
|
||||||
)
|
|
||||||
|
|
||||||
minRunners = prometheus.NewGaugeVec(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Subsystem: githubScaleSetSubsystem,
|
|
||||||
Name: "min_runners",
|
|
||||||
Help: "Minimum number of runners.",
|
|
||||||
},
|
|
||||||
scaleSetLabels,
|
|
||||||
)
|
|
||||||
|
|
||||||
maxRunners = prometheus.NewGaugeVec(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Subsystem: githubScaleSetSubsystem,
|
|
||||||
Name: "max_runners",
|
|
||||||
Help: "Maximum number of runners.",
|
|
||||||
},
|
|
||||||
scaleSetLabels,
|
|
||||||
)
|
|
||||||
|
|
||||||
desiredRunners = prometheus.NewGaugeVec(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Subsystem: githubScaleSetSubsystem,
|
|
||||||
Name: "desired_runners",
|
|
||||||
Help: "Number of runners desired by the scale set.",
|
|
||||||
},
|
|
||||||
scaleSetLabels,
|
|
||||||
)
|
|
||||||
|
|
||||||
idleRunners = prometheus.NewGaugeVec(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Subsystem: githubScaleSetSubsystem,
|
|
||||||
Name: "idle_runners",
|
|
||||||
Help: "Number of registered runners not running a job.",
|
|
||||||
},
|
|
||||||
scaleSetLabels,
|
|
||||||
)
|
|
||||||
|
|
||||||
startedJobsTotal = prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Subsystem: githubScaleSetSubsystem,
|
|
||||||
Name: "started_jobs_total",
|
|
||||||
Help: "Total number of jobs started.",
|
|
||||||
},
|
|
||||||
startedJobsTotalLabels,
|
|
||||||
)
|
|
||||||
|
|
||||||
completedJobsTotal = prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "completed_jobs_total",
|
|
||||||
Help: "Total number of jobs completed.",
|
|
||||||
Subsystem: githubScaleSetSubsystem,
|
|
||||||
},
|
|
||||||
completedJobsTotalLabels,
|
|
||||||
)
|
|
||||||
|
|
||||||
jobStartupDurationSeconds = prometheus.NewHistogramVec(
|
|
||||||
prometheus.HistogramOpts{
|
|
||||||
Subsystem: githubScaleSetSubsystem,
|
|
||||||
Name: "job_startup_duration_seconds",
|
|
||||||
Help: "Time spent waiting for workflow job to get started on the runner owned by the scale set (in seconds).",
|
|
||||||
Buckets: runtimeBuckets,
|
|
||||||
},
|
|
||||||
jobStartupDurationLabels,
|
|
||||||
)
|
|
||||||
|
|
||||||
jobExecutionDurationSeconds = prometheus.NewHistogramVec(
|
|
||||||
prometheus.HistogramOpts{
|
|
||||||
Subsystem: githubScaleSetSubsystem,
|
|
||||||
Name: "job_execution_duration_seconds",
|
|
||||||
Help: "Time spent executing workflow jobs by the scale set (in seconds).",
|
|
||||||
Buckets: runtimeBuckets,
|
|
||||||
},
|
|
||||||
jobExecutionDurationLabels,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
var runtimeBuckets []float64 = []float64{
|
|
||||||
0.01,
|
|
||||||
0.05,
|
|
||||||
0.1,
|
|
||||||
0.5,
|
|
||||||
1,
|
|
||||||
2,
|
|
||||||
3,
|
|
||||||
4,
|
|
||||||
5,
|
|
||||||
6,
|
|
||||||
7,
|
|
||||||
8,
|
|
||||||
9,
|
|
||||||
10,
|
|
||||||
12,
|
|
||||||
15,
|
|
||||||
18,
|
|
||||||
20,
|
|
||||||
25,
|
|
||||||
30,
|
|
||||||
40,
|
|
||||||
50,
|
|
||||||
60,
|
|
||||||
70,
|
|
||||||
80,
|
|
||||||
90,
|
|
||||||
100,
|
|
||||||
110,
|
|
||||||
120,
|
|
||||||
150,
|
|
||||||
180,
|
|
||||||
210,
|
|
||||||
240,
|
|
||||||
300,
|
|
||||||
360,
|
|
||||||
420,
|
|
||||||
480,
|
|
||||||
540,
|
|
||||||
600,
|
|
||||||
900,
|
|
||||||
1200,
|
|
||||||
1800,
|
|
||||||
2400,
|
|
||||||
3000,
|
|
||||||
3600,
|
|
||||||
}
|
|
||||||
|
|
||||||
type baseLabels struct {
|
|
||||||
scaleSetName string
|
|
||||||
scaleSetNamespace string
|
|
||||||
enterprise string
|
|
||||||
organization string
|
|
||||||
repository string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *baseLabels) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels {
|
|
||||||
return prometheus.Labels{
|
|
||||||
labelKeyEnterprise: b.enterprise,
|
|
||||||
labelKeyOrganization: b.organization,
|
|
||||||
labelKeyRepository: b.repository,
|
|
||||||
labelKeyJobName: jobBase.JobDisplayName,
|
|
||||||
labelKeyJobWorkflowRef: jobBase.JobWorkflowRef,
|
|
||||||
labelKeyEventName: jobBase.EventName,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *baseLabels) scaleSetLabels() prometheus.Labels {
|
|
||||||
return prometheus.Labels{
|
|
||||||
labelKeyRunnerScaleSetName: b.scaleSetName,
|
|
||||||
labelKeyRunnerScaleSetNamespace: b.scaleSetNamespace,
|
|
||||||
labelKeyEnterprise: b.enterprise,
|
|
||||||
labelKeyOrganization: b.organization,
|
|
||||||
labelKeyRepository: b.repository,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *baseLabels) completedJobLabels(msg *actions.JobCompleted) prometheus.Labels {
|
|
||||||
l := b.jobLabels(&msg.JobMessageBase)
|
|
||||||
l[labelKeyRunnerID] = strconv.Itoa(msg.RunnerId)
|
|
||||||
l[labelKeyJobResult] = msg.Result
|
|
||||||
l[labelKeyRunnerName] = msg.RunnerName
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *baseLabels) startedJobLabels(msg *actions.JobStarted) prometheus.Labels {
|
|
||||||
l := b.jobLabels(&msg.JobMessageBase)
|
|
||||||
l[labelKeyRunnerID] = strconv.Itoa(msg.RunnerId)
|
|
||||||
l[labelKeyRunnerName] = msg.RunnerName
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:generate mockery --name Publisher --output ./mocks --outpkg mocks --case underscore
|
|
||||||
type Publisher interface {
|
|
||||||
PublishStatic(min, max int)
|
|
||||||
PublishStatistics(stats *actions.RunnerScaleSetStatistic)
|
|
||||||
PublishJobStarted(msg *actions.JobStarted)
|
|
||||||
PublishJobCompleted(msg *actions.JobCompleted)
|
|
||||||
PublishDesiredRunners(count int)
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:generate mockery --name ServerPublisher --output ./mocks --outpkg mocks --case underscore
|
|
||||||
type ServerPublisher interface {
|
|
||||||
Publisher
|
|
||||||
ListenAndServe(ctx context.Context) error
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ Publisher = &discard{}
|
|
||||||
var _ ServerPublisher = &exporter{}
|
|
||||||
|
|
||||||
var Discard Publisher = &discard{}
|
|
||||||
|
|
||||||
type exporter struct {
|
|
||||||
logger logr.Logger
|
|
||||||
baseLabels
|
|
||||||
srv *http.Server
|
|
||||||
}
|
|
||||||
|
|
||||||
type ExporterConfig struct {
|
|
||||||
ScaleSetName string
|
|
||||||
ScaleSetNamespace string
|
|
||||||
Enterprise string
|
|
||||||
Organization string
|
|
||||||
Repository string
|
|
||||||
ServerAddr string
|
|
||||||
ServerEndpoint string
|
|
||||||
Logger logr.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewExporter(config ExporterConfig) ServerPublisher {
|
|
||||||
reg := prometheus.NewRegistry()
|
|
||||||
reg.MustRegister(
|
|
||||||
assignedJobs,
|
|
||||||
runningJobs,
|
|
||||||
registeredRunners,
|
|
||||||
busyRunners,
|
|
||||||
minRunners,
|
|
||||||
maxRunners,
|
|
||||||
desiredRunners,
|
|
||||||
idleRunners,
|
|
||||||
startedJobsTotal,
|
|
||||||
completedJobsTotal,
|
|
||||||
jobStartupDurationSeconds,
|
|
||||||
jobExecutionDurationSeconds,
|
|
||||||
)
|
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
|
||||||
mux.Handle(
|
|
||||||
config.ServerEndpoint,
|
|
||||||
promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}),
|
|
||||||
)
|
|
||||||
|
|
||||||
return &exporter{
|
|
||||||
logger: config.Logger.WithName("metrics"),
|
|
||||||
baseLabels: baseLabels{
|
|
||||||
scaleSetName: config.ScaleSetName,
|
|
||||||
scaleSetNamespace: config.ScaleSetNamespace,
|
|
||||||
enterprise: config.Enterprise,
|
|
||||||
organization: config.Organization,
|
|
||||||
repository: config.Repository,
|
|
||||||
},
|
|
||||||
srv: &http.Server{
|
|
||||||
Addr: config.ServerAddr,
|
|
||||||
Handler: mux,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *exporter) ListenAndServe(ctx context.Context) error {
|
|
||||||
e.logger.Info("starting metrics server", "addr", e.srv.Addr)
|
|
||||||
go func() {
|
|
||||||
<-ctx.Done()
|
|
||||||
e.logger.Info("stopping metrics server")
|
|
||||||
e.srv.Shutdown(ctx)
|
|
||||||
}()
|
|
||||||
return e.srv.ListenAndServe()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *exporter) PublishStatic(min, max int) {
|
|
||||||
l := m.scaleSetLabels()
|
|
||||||
maxRunners.With(l).Set(float64(max))
|
|
||||||
minRunners.With(l).Set(float64(min))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *exporter) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
|
|
||||||
l := e.scaleSetLabels()
|
|
||||||
|
|
||||||
assignedJobs.With(l).Set(float64(stats.TotalAssignedJobs))
|
|
||||||
runningJobs.With(l).Set(float64(stats.TotalRunningJobs))
|
|
||||||
registeredRunners.With(l).Set(float64(stats.TotalRegisteredRunners))
|
|
||||||
busyRunners.With(l).Set(float64(stats.TotalBusyRunners))
|
|
||||||
idleRunners.With(l).Set(float64(stats.TotalIdleRunners))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *exporter) PublishJobStarted(msg *actions.JobStarted) {
|
|
||||||
l := e.startedJobLabels(msg)
|
|
||||||
startedJobsTotal.With(l).Inc()
|
|
||||||
|
|
||||||
startupDuration := msg.JobMessageBase.RunnerAssignTime.Unix() - msg.JobMessageBase.ScaleSetAssignTime.Unix()
|
|
||||||
jobStartupDurationSeconds.With(l).Observe(float64(startupDuration))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *exporter) PublishJobCompleted(msg *actions.JobCompleted) {
|
|
||||||
l := e.completedJobLabels(msg)
|
|
||||||
completedJobsTotal.With(l).Inc()
|
|
||||||
|
|
||||||
executionDuration := msg.JobMessageBase.FinishTime.Unix() - msg.JobMessageBase.RunnerAssignTime.Unix()
|
|
||||||
jobExecutionDurationSeconds.With(l).Observe(float64(executionDuration))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *exporter) PublishDesiredRunners(count int) {
|
|
||||||
desiredRunners.With(m.scaleSetLabels()).Set(float64(count))
|
|
||||||
}
|
|
||||||
|
|
||||||
type discard struct{}
|
|
||||||
|
|
||||||
func (*discard) PublishStatic(int, int) {}
|
|
||||||
func (*discard) PublishStatistics(*actions.RunnerScaleSetStatistic) {}
|
|
||||||
func (*discard) PublishJobStarted(*actions.JobStarted) {}
|
|
||||||
func (*discard) PublishJobCompleted(*actions.JobCompleted) {}
|
|
||||||
func (*discard) PublishDesiredRunners(int) {}
|
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
// Code generated by mockery v2.36.1. DO NOT EDIT.
|
|
||||||
|
|
||||||
package mocks
|
|
||||||
|
|
||||||
import (
|
|
||||||
actions "github.com/actions/actions-runner-controller/github/actions"
|
|
||||||
|
|
||||||
mock "github.com/stretchr/testify/mock"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Publisher is an autogenerated mock type for the Publisher type
|
|
||||||
type Publisher struct {
|
|
||||||
mock.Mock
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishDesiredRunners provides a mock function with given fields: count
|
|
||||||
func (_m *Publisher) PublishDesiredRunners(count int) {
|
|
||||||
_m.Called(count)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishJobCompleted provides a mock function with given fields: msg
|
|
||||||
func (_m *Publisher) PublishJobCompleted(msg *actions.JobCompleted) {
|
|
||||||
_m.Called(msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishJobStarted provides a mock function with given fields: msg
|
|
||||||
func (_m *Publisher) PublishJobStarted(msg *actions.JobStarted) {
|
|
||||||
_m.Called(msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishStatic provides a mock function with given fields: min, max
|
|
||||||
func (_m *Publisher) PublishStatic(min int, max int) {
|
|
||||||
_m.Called(min, max)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishStatistics provides a mock function with given fields: stats
|
|
||||||
func (_m *Publisher) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
|
|
||||||
_m.Called(stats)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPublisher creates a new instance of Publisher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
|
||||||
// The first argument is typically a *testing.T value.
|
|
||||||
func NewPublisher(t interface {
|
|
||||||
mock.TestingT
|
|
||||||
Cleanup(func())
|
|
||||||
}) *Publisher {
|
|
||||||
mock := &Publisher{}
|
|
||||||
mock.Mock.Test(t)
|
|
||||||
|
|
||||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
|
||||||
|
|
||||||
return mock
|
|
||||||
}
|
|
||||||
@@ -1,69 +0,0 @@
|
|||||||
// Code generated by mockery v2.36.1. DO NOT EDIT.
|
|
||||||
|
|
||||||
package mocks
|
|
||||||
|
|
||||||
import (
|
|
||||||
context "context"
|
|
||||||
|
|
||||||
actions "github.com/actions/actions-runner-controller/github/actions"
|
|
||||||
|
|
||||||
mock "github.com/stretchr/testify/mock"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ServerPublisher is an autogenerated mock type for the ServerPublisher type
|
|
||||||
type ServerPublisher struct {
|
|
||||||
mock.Mock
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListenAndServe provides a mock function with given fields: ctx
|
|
||||||
func (_m *ServerPublisher) ListenAndServe(ctx context.Context) error {
|
|
||||||
ret := _m.Called(ctx)
|
|
||||||
|
|
||||||
var r0 error
|
|
||||||
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
|
|
||||||
r0 = rf(ctx)
|
|
||||||
} else {
|
|
||||||
r0 = ret.Error(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
return r0
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishDesiredRunners provides a mock function with given fields: count
|
|
||||||
func (_m *ServerPublisher) PublishDesiredRunners(count int) {
|
|
||||||
_m.Called(count)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishJobCompleted provides a mock function with given fields: msg
|
|
||||||
func (_m *ServerPublisher) PublishJobCompleted(msg *actions.JobCompleted) {
|
|
||||||
_m.Called(msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishJobStarted provides a mock function with given fields: msg
|
|
||||||
func (_m *ServerPublisher) PublishJobStarted(msg *actions.JobStarted) {
|
|
||||||
_m.Called(msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishStatic provides a mock function with given fields: min, max
|
|
||||||
func (_m *ServerPublisher) PublishStatic(min int, max int) {
|
|
||||||
_m.Called(min, max)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishStatistics provides a mock function with given fields: stats
|
|
||||||
func (_m *ServerPublisher) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
|
|
||||||
_m.Called(stats)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewServerPublisher creates a new instance of ServerPublisher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
|
||||||
// The first argument is typically a *testing.T value.
|
|
||||||
func NewServerPublisher(t interface {
|
|
||||||
mock.TestingT
|
|
||||||
Cleanup(func())
|
|
||||||
}) *ServerPublisher {
|
|
||||||
mock := &ServerPublisher{}
|
|
||||||
mock.Mock.Test(t)
|
|
||||||
|
|
||||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
|
||||||
|
|
||||||
return mock
|
|
||||||
}
|
|
||||||
@@ -1,234 +0,0 @@
|
|||||||
package worker
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
|
||||||
"github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
|
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
|
||||||
"github.com/actions/actions-runner-controller/logging"
|
|
||||||
jsonpatch "github.com/evanphx/json-patch"
|
|
||||||
"github.com/go-logr/logr"
|
|
||||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/client-go/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
const workerName = "kubernetesworker"
|
|
||||||
|
|
||||||
type Option func(*Worker)
|
|
||||||
|
|
||||||
func WithLogger(logger logr.Logger) Option {
|
|
||||||
return func(w *Worker) {
|
|
||||||
logger = logger.WithName(workerName)
|
|
||||||
w.logger = &logger
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type Config struct {
|
|
||||||
EphemeralRunnerSetNamespace string
|
|
||||||
EphemeralRunnerSetName string
|
|
||||||
MaxRunners int
|
|
||||||
MinRunners int
|
|
||||||
}
|
|
||||||
|
|
||||||
// The Worker's role is to process the messages it receives from the listener.
|
|
||||||
// It then initiates Kubernetes API requests to carry out the necessary actions.
|
|
||||||
type Worker struct {
|
|
||||||
clientset *kubernetes.Clientset
|
|
||||||
config Config
|
|
||||||
lastPatch int
|
|
||||||
logger *logr.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ listener.Handler = (*Worker)(nil)
|
|
||||||
|
|
||||||
func New(config Config, options ...Option) (*Worker, error) {
|
|
||||||
w := &Worker{
|
|
||||||
config: config,
|
|
||||||
lastPatch: -1,
|
|
||||||
}
|
|
||||||
|
|
||||||
conf, err := rest.InClusterConfig()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
clientset, err := kubernetes.NewForConfig(conf)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
w.clientset = clientset
|
|
||||||
|
|
||||||
for _, option := range options {
|
|
||||||
option(w)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.applyDefaults(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return w, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Worker) applyDefaults() error {
|
|
||||||
if w.logger == nil {
|
|
||||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatJSON)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("NewLogger failed: %w", err)
|
|
||||||
}
|
|
||||||
logger = logger.WithName(workerName)
|
|
||||||
w.logger = &logger
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleJobStarted updates the job information for the ephemeral runner when a job is started.
|
|
||||||
// It takes a context and a jobInfo parameter which contains the details of the started job.
|
|
||||||
// This update marks the ephemeral runner so that the controller would have more context
|
|
||||||
// about the ephemeral runner that should not be deleted when scaling down.
|
|
||||||
// It returns an error if there is any issue with updating the job information.
|
|
||||||
func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
|
|
||||||
w.logger.Info("Updating job info for the runner",
|
|
||||||
"runnerName", jobInfo.RunnerName,
|
|
||||||
"ownerName", jobInfo.OwnerName,
|
|
||||||
"repoName", jobInfo.RepositoryName,
|
|
||||||
"workflowRef", jobInfo.JobWorkflowRef,
|
|
||||||
"workflowRunId", jobInfo.WorkflowRunId,
|
|
||||||
"jobDisplayName", jobInfo.JobDisplayName,
|
|
||||||
"requestId", jobInfo.RunnerRequestId)
|
|
||||||
|
|
||||||
original, err := json.Marshal(&v1alpha1.EphemeralRunner{})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to marshal empty ephemeral runner: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
patch, err := json.Marshal(
|
|
||||||
&v1alpha1.EphemeralRunner{
|
|
||||||
Status: v1alpha1.EphemeralRunnerStatus{
|
|
||||||
JobRequestId: jobInfo.RunnerRequestId,
|
|
||||||
JobRepositoryName: fmt.Sprintf("%s/%s", jobInfo.OwnerName, jobInfo.RepositoryName),
|
|
||||||
WorkflowRunId: jobInfo.WorkflowRunId,
|
|
||||||
JobWorkflowRef: jobInfo.JobWorkflowRef,
|
|
||||||
JobDisplayName: jobInfo.JobDisplayName,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to marshal ephemeral runner patch: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
mergePatch, err := jsonpatch.CreateMergePatch(original, patch)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create merge patch json for ephemeral runner: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
w.logger.Info("Updating ephemeral runner with merge patch", "json", string(mergePatch))
|
|
||||||
|
|
||||||
patchedStatus := &v1alpha1.EphemeralRunner{}
|
|
||||||
err = w.clientset.RESTClient().
|
|
||||||
Patch(types.MergePatchType).
|
|
||||||
Prefix("apis", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version).
|
|
||||||
Namespace(w.config.EphemeralRunnerSetNamespace).
|
|
||||||
Resource("EphemeralRunners").
|
|
||||||
Name(jobInfo.RunnerName).
|
|
||||||
SubResource("status").
|
|
||||||
Body(mergePatch).
|
|
||||||
Do(ctx).
|
|
||||||
Into(patchedStatus)
|
|
||||||
if err != nil {
|
|
||||||
if kerrors.IsNotFound(err) {
|
|
||||||
w.logger.Info("Ephemeral runner not found, skipping patching of ephemeral runner status", "runnerName", jobInfo.RunnerName)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fmt.Errorf("could not patch ephemeral runner status, patch JSON: %s, error: %w", string(mergePatch), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
w.logger.Info("Ephemeral runner status updated with the merge patch successfully.")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleDesiredRunnerCount handles the desired runner count by scaling the ephemeral runner set.
|
|
||||||
// The function calculates the target runner count based on the minimum and maximum runner count configuration.
|
|
||||||
// If the target runner count is the same as the last patched count, it skips patching and returns nil.
|
|
||||||
// Otherwise, it creates a merge patch JSON for updating the ephemeral runner set with the desired count.
|
|
||||||
// The function then scales the ephemeral runner set by applying the merge patch.
|
|
||||||
// Finally, it logs the scaled ephemeral runner set details and returns nil if successful.
|
|
||||||
// If any error occurs during the process, it returns an error with a descriptive message.
|
|
||||||
func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int) (int, error) {
|
|
||||||
// Max runners should always be set by the resource builder either to the configured value,
|
|
||||||
// or the maximum int32 (resourcebuilder.newAutoScalingListener()).
|
|
||||||
targetRunnerCount := min(w.config.MinRunners+count, w.config.MaxRunners)
|
|
||||||
|
|
||||||
logValues := []any{
|
|
||||||
"assigned job", count,
|
|
||||||
"decision", targetRunnerCount,
|
|
||||||
"min", w.config.MinRunners,
|
|
||||||
"max", w.config.MaxRunners,
|
|
||||||
"currentRunnerCount", w.lastPatch,
|
|
||||||
}
|
|
||||||
|
|
||||||
if targetRunnerCount == w.lastPatch {
|
|
||||||
w.logger.Info("Skipping patching of EphemeralRunnerSet as the desired count has not changed", logValues...)
|
|
||||||
return targetRunnerCount, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
original, err := json.Marshal(
|
|
||||||
&v1alpha1.EphemeralRunnerSet{
|
|
||||||
Spec: v1alpha1.EphemeralRunnerSetSpec{
|
|
||||||
Replicas: -1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("failed to marshal empty ephemeral runner set: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
patch, err := json.Marshal(
|
|
||||||
&v1alpha1.EphemeralRunnerSet{
|
|
||||||
Spec: v1alpha1.EphemeralRunnerSetSpec{
|
|
||||||
Replicas: targetRunnerCount,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
w.logger.Error(err, "could not marshal patch ephemeral runner set")
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
mergePatch, err := jsonpatch.CreateMergePatch(original, patch)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("failed to create merge patch json for ephemeral runner set: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
w.logger.Info("Created merge patch json for EphemeralRunnerSet update", "json", string(mergePatch))
|
|
||||||
|
|
||||||
w.logger.Info("Scaling ephemeral runner set", logValues...)
|
|
||||||
|
|
||||||
patchedEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{}
|
|
||||||
err = w.clientset.RESTClient().
|
|
||||||
Patch(types.MergePatchType).
|
|
||||||
Prefix("apis", v1alpha1.GroupVersion.Group, v1alpha1.GroupVersion.Version).
|
|
||||||
Namespace(w.config.EphemeralRunnerSetNamespace).
|
|
||||||
Resource("ephemeralrunnersets").
|
|
||||||
Name(w.config.EphemeralRunnerSetName).
|
|
||||||
Body([]byte(mergePatch)).
|
|
||||||
Do(ctx).
|
|
||||||
Into(patchedEphemeralRunnerSet)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("could not patch ephemeral runner set , patch JSON: %s, error: %w", string(mergePatch), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
w.logger.Info("Ephemeral runner set scaled.",
|
|
||||||
"namespace", w.config.EphemeralRunnerSetNamespace,
|
|
||||||
"name", w.config.EphemeralRunnerSetName,
|
|
||||||
"replicas", patchedEphemeralRunnerSet.Spec.Replicas,
|
|
||||||
)
|
|
||||||
return targetRunnerCount, nil
|
|
||||||
}
|
|
||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
|
"github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
|
||||||
@@ -205,9 +206,7 @@ func (s *Service) processMessage(message *actions.RunnerScaleSetMessage) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) scaleForAssignedJobCount(count int) error {
|
func (s *Service) scaleForAssignedJobCount(count int) error {
|
||||||
// Max runners should always be set by the resource builder either to the configured value,
|
targetRunnerCount := int(math.Max(math.Min(float64(s.settings.MaxRunners), float64(count)), float64(s.settings.MinRunners)))
|
||||||
// or the maximum int32 (resourcebuilder.newAutoScalingListener()).
|
|
||||||
targetRunnerCount := min(s.settings.MinRunners+count, s.settings.MaxRunners)
|
|
||||||
s.metricsExporter.publishDesiredRunners(targetRunnerCount)
|
s.metricsExporter.publishDesiredRunners(targetRunnerCount)
|
||||||
if targetRunnerCount != s.currentRunnerCount {
|
if targetRunnerCount != s.currentRunnerCount {
|
||||||
s.logger.Info("try scale runner request up/down base on assigned job count",
|
s.logger.Info("try scale runner request up/down base on assigned job count",
|
||||||
|
|||||||
@@ -397,7 +397,7 @@ func TestProcessMessage_MultipleMessages(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 3 && ids[1] == 4 })).Return(nil).Once()
|
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 3 && ids[1] == 4 })).Return(nil).Once()
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 3).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
||||||
|
|
||||||
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||||
MessageId: 1,
|
MessageId: 1,
|
||||||
@@ -523,9 +523,9 @@ func TestScaleForAssignedJobCount_ScaleWithinMinMax(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 1).Return(nil).Once()
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 1).Return(nil).Once()
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 4).Return(nil).Once()
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 3).Return(nil).Once()
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(nil).Once()
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 1).Return(nil).Once()
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
|
||||||
|
|
||||||
err = service.scaleForAssignedJobCount(0)
|
err = service.scaleForAssignedJobCount(0)
|
||||||
@@ -569,7 +569,7 @@ func TestScaleForAssignedJobCount_ScaleFailed(t *testing.T) {
|
|||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 3).Return(fmt.Errorf("error"))
|
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(fmt.Errorf("error"))
|
||||||
|
|
||||||
err = service.scaleForAssignedJobCount(2)
|
err = service.scaleForAssignedJobCount(2)
|
||||||
|
|
||||||
@@ -605,23 +605,8 @@ func TestProcessMessage_JobStartedMessage(t *testing.T) {
|
|||||||
|
|
||||||
service.currentRunnerCount = 1
|
service.currentRunnerCount = 1
|
||||||
|
|
||||||
mockKubeManager.On(
|
mockKubeManager.On("UpdateEphemeralRunnerWithJobInfo", ctx, service.settings.Namespace, "runner1", "owner1", "repo1", ".github/workflows/ci.yaml", "job1", int64(100), int64(3)).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
||||||
"UpdateEphemeralRunnerWithJobInfo",
|
|
||||||
ctx,
|
|
||||||
service.settings.Namespace,
|
|
||||||
"runner1",
|
|
||||||
"owner1",
|
|
||||||
"repo1",
|
|
||||||
".github/workflows/ci.yaml",
|
|
||||||
"job1",
|
|
||||||
int64(100),
|
|
||||||
int64(3),
|
|
||||||
).Run(
|
|
||||||
func(_ mock.Arguments) { cancel() },
|
|
||||||
).Return(nil).Once()
|
|
||||||
|
|
||||||
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return len(ids) == 0 })).Return(nil).Once()
|
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return len(ids) == 0 })).Return(nil).Once()
|
||||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(nil)
|
|
||||||
|
|
||||||
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||||
MessageId: 1,
|
MessageId: 1,
|
||||||
|
|||||||
@@ -176,8 +176,6 @@ func run(ctx context.Context, rc config.Config, logger logr.Logger, opts runOpti
|
|||||||
Version: build.Version,
|
Version: build.Version,
|
||||||
CommitSHA: build.CommitSHA,
|
CommitSHA: build.CommitSHA,
|
||||||
ScaleSetID: rc.RunnerScaleSetId,
|
ScaleSetID: rc.RunnerScaleSetId,
|
||||||
HasProxy: hasProxy(),
|
|
||||||
Subsystem: "githubrunnerscalesetlistener",
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create an Actions Service client: %w", err)
|
return fmt.Errorf("failed to create an Actions Service client: %w", err)
|
||||||
@@ -237,8 +235,3 @@ func newActionsClientFromConfig(config config.Config, creds *actions.ActionsAuth
|
|||||||
|
|
||||||
return actions.NewClient(config.ConfigureUrl, creds, options...)
|
return actions.NewClient(config.ConfigureUrl, creds, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasProxy() bool {
|
|
||||||
proxyFunc := httpproxy.FromEnvironment().ProxyFunc()
|
|
||||||
return proxyFunc != nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// Code generated by mockery v2.36.1. DO NOT EDIT.
|
// Code generated by mockery v2.33.2. DO NOT EDIT.
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// Code generated by mockery v2.36.1. DO NOT EDIT.
|
// Code generated by mockery v2.33.2. DO NOT EDIT.
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
autoscalingListenerContainerName = "listener"
|
autoscalingListenerContainerName = "autoscaler"
|
||||||
autoscalingListenerFinalizerName = "autoscalinglistener.actions.github.com/finalizer"
|
autoscalingListenerFinalizerName = "autoscalinglistener.actions.github.com/finalizer"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -425,7 +425,7 @@ var _ = Describe("Test AutoScalingListener customization", func() {
|
|||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
Containers: []corev1.Container{
|
Containers: []corev1.Container{
|
||||||
{
|
{
|
||||||
Name: autoscalingListenerContainerName,
|
Name: "listener",
|
||||||
ImagePullPolicy: corev1.PullAlways,
|
ImagePullPolicy: corev1.PullAlways,
|
||||||
SecurityContext: &corev1.SecurityContext{
|
SecurityContext: &corev1.SecurityContext{
|
||||||
RunAsUser: &runAsUser,
|
RunAsUser: &runAsUser,
|
||||||
@@ -555,7 +555,7 @@ var _ = Describe("Test AutoScalingListener customization", func() {
|
|||||||
|
|
||||||
Expect(pod.Spec.SecurityContext.RunAsUser).To(Equal(&runAsUser), "Pod should have the correct security context")
|
Expect(pod.Spec.SecurityContext.RunAsUser).To(Equal(&runAsUser), "Pod should have the correct security context")
|
||||||
|
|
||||||
Expect(pod.Spec.Containers[0].Name).To(Equal(autoscalingListenerContainerName), "Pod should have the correct container name")
|
Expect(pod.Spec.Containers[0].Name).NotTo(Equal("listener"), "Pod should have the correct container name")
|
||||||
Expect(pod.Spec.Containers[0].SecurityContext.RunAsUser).To(Equal(&runAsUser), "Pod should have the correct security context")
|
Expect(pod.Spec.Containers[0].SecurityContext.RunAsUser).To(Equal(&runAsUser), "Pod should have the correct security context")
|
||||||
Expect(pod.Spec.Containers[0].ImagePullPolicy).To(Equal(corev1.PullAlways), "Pod should have the correct image pull policy")
|
Expect(pod.Spec.Containers[0].ImagePullPolicy).To(Equal(corev1.PullAlways), "Pod should have the correct image pull policy")
|
||||||
|
|
||||||
@@ -854,7 +854,7 @@ var _ = Describe("Test AutoScalingListener controller with template modification
|
|||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
Containers: []corev1.Container{
|
Containers: []corev1.Container{
|
||||||
{
|
{
|
||||||
Name: autoscalingListenerContainerName,
|
Name: "listener",
|
||||||
ImagePullPolicy: corev1.PullAlways,
|
ImagePullPolicy: corev1.PullAlways,
|
||||||
SecurityContext: &corev1.SecurityContext{
|
SecurityContext: &corev1.SecurityContext{
|
||||||
RunAsUser: &runAsUser1001,
|
RunAsUser: &runAsUser1001,
|
||||||
|
|||||||
@@ -466,8 +466,6 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
|
|||||||
Version: build.Version,
|
Version: build.Version,
|
||||||
CommitSHA: build.CommitSHA,
|
CommitSHA: build.CommitSHA,
|
||||||
ScaleSetID: runnerScaleSet.Id,
|
ScaleSetID: runnerScaleSet.Id,
|
||||||
HasProxy: autoscalingRunnerSet.Spec.Proxy != nil,
|
|
||||||
Subsystem: "controller",
|
|
||||||
})
|
})
|
||||||
|
|
||||||
logger.Info("Created/Reused a runner scale set", "id", runnerScaleSet.Id, "runnerGroupName", runnerScaleSet.RunnerGroupName)
|
logger.Info("Created/Reused a runner scale set", "id", runnerScaleSet.Id, "runnerGroupName", runnerScaleSet.RunnerGroupName)
|
||||||
|
|||||||
@@ -66,9 +66,3 @@ const DefaultScaleSetListenerLogFormat = string(logging.LogFormatText)
|
|||||||
|
|
||||||
// ownerKey is field selector matching the owner name of a particular resource
|
// ownerKey is field selector matching the owner name of a particular resource
|
||||||
const resourceOwnerKey = ".metadata.controller"
|
const resourceOwnerKey = ".metadata.controller"
|
||||||
|
|
||||||
// EphemeralRunner pod creation failure reasons
|
|
||||||
const (
|
|
||||||
ReasonTooManyPodFailures = "TooManyPodFailures"
|
|
||||||
ReasonInvalidPodFailure = "InvalidPod"
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -192,7 +192,7 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
|||||||
case len(ephemeralRunner.Status.Failures) > 5:
|
case len(ephemeralRunner.Status.Failures) > 5:
|
||||||
log.Info("EphemeralRunner has failed more than 5 times. Marking it as failed")
|
log.Info("EphemeralRunner has failed more than 5 times. Marking it as failed")
|
||||||
errMessage := fmt.Sprintf("Pod has failed to start more than 5 times: %s", pod.Status.Message)
|
errMessage := fmt.Sprintf("Pod has failed to start more than 5 times: %s", pod.Status.Message)
|
||||||
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, ReasonTooManyPodFailures, log); err != nil {
|
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, log); err != nil {
|
||||||
log.Error(err, "Failed to set ephemeral runner to phase Failed")
|
log.Error(err, "Failed to set ephemeral runner to phase Failed")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
@@ -201,22 +201,7 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
|||||||
default:
|
default:
|
||||||
// Pod was not found. Create if the pod has never been created
|
// Pod was not found. Create if the pod has never been created
|
||||||
log.Info("Creating new EphemeralRunner pod.")
|
log.Info("Creating new EphemeralRunner pod.")
|
||||||
result, err := r.createPod(ctx, ephemeralRunner, secret, log)
|
return r.createPod(ctx, ephemeralRunner, secret, log)
|
||||||
switch {
|
|
||||||
case err == nil:
|
|
||||||
return result, nil
|
|
||||||
case kerrors.IsInvalid(err) || kerrors.IsForbidden(err):
|
|
||||||
log.Error(err, "Failed to create a pod due to unrecoverable failure")
|
|
||||||
errMessage := fmt.Sprintf("Failed to create the pod: %v", err)
|
|
||||||
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, ReasonInvalidPodFailure, log); err != nil {
|
|
||||||
log.Error(err, "Failed to set ephemeral runner to phase Failed")
|
|
||||||
return ctrl.Result{}, err
|
|
||||||
}
|
|
||||||
return ctrl.Result{}, nil
|
|
||||||
default:
|
|
||||||
log.Error(err, "Failed to create the pod")
|
|
||||||
return ctrl.Result{}, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -439,11 +424,11 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Conte
|
|||||||
return false, multierr.Combine(errs...)
|
return false, multierr.Combine(errs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, errMessage string, reason string, log logr.Logger) error {
|
func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, errMessage string, log logr.Logger) error {
|
||||||
log.Info("Updating ephemeral runner status to Failed")
|
log.Info("Updating ephemeral runner status to Failed")
|
||||||
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||||
obj.Status.Phase = corev1.PodFailed
|
obj.Status.Phase = corev1.PodFailed
|
||||||
obj.Status.Reason = reason
|
obj.Status.Reason = "TooManyPodFailures"
|
||||||
obj.Status.Message = errMessage
|
obj.Status.Message = errMessage
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return fmt.Errorf("failed to update ephemeral runner status Phase/Message: %v", err)
|
return fmt.Errorf("failed to update ephemeral runner status Phase/Message: %v", err)
|
||||||
|
|||||||
@@ -189,25 +189,6 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
).Should(BeEquivalentTo(true))
|
).Should(BeEquivalentTo(true))
|
||||||
})
|
})
|
||||||
|
|
||||||
It("It should failed if a pod template is invalid", func() {
|
|
||||||
invalideEphemeralRunner := newExampleRunner("invalid-ephemeral-runner", autoscalingNS.Name, configSecret.Name)
|
|
||||||
invalideEphemeralRunner.Spec.Spec.PriorityClassName = "notexist"
|
|
||||||
|
|
||||||
err := k8sClient.Create(ctx, invalideEphemeralRunner)
|
|
||||||
Expect(err).To(BeNil())
|
|
||||||
|
|
||||||
updated := new(v1alpha1.EphemeralRunner)
|
|
||||||
Eventually(func() (corev1.PodPhase, error) {
|
|
||||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: invalideEphemeralRunner.Name, Namespace: invalideEphemeralRunner.Namespace}, updated)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
return updated.Status.Phase, nil
|
|
||||||
}, timeout, interval).Should(BeEquivalentTo(corev1.PodFailed))
|
|
||||||
Expect(updated.Status.Reason).Should(Equal("InvalidPod"))
|
|
||||||
Expect(updated.Status.Message).Should(Equal("Failed to create the pod: pods \"invalid-ephemeral-runner\" is forbidden: no PriorityClass with name notexist was found"))
|
|
||||||
})
|
|
||||||
|
|
||||||
It("It should clean up resources when deleted", func() {
|
It("It should clean up resources when deleted", func() {
|
||||||
// wait for pod to be created
|
// wait for pod to be created
|
||||||
pod := new(corev1.Pod)
|
pod := new(corev1.Pod)
|
||||||
|
|||||||
@@ -38,11 +38,8 @@ var commonLabelKeys = [...]string{
|
|||||||
|
|
||||||
const labelValueKubernetesPartOf = "gha-runner-scale-set"
|
const labelValueKubernetesPartOf = "gha-runner-scale-set"
|
||||||
|
|
||||||
var (
|
var scaleSetListenerLogLevel = DefaultScaleSetListenerLogLevel
|
||||||
scaleSetListenerLogLevel = DefaultScaleSetListenerLogLevel
|
var scaleSetListenerLogFormat = DefaultScaleSetListenerLogFormat
|
||||||
scaleSetListenerLogFormat = DefaultScaleSetListenerLogFormat
|
|
||||||
scaleSetListenerEntrypoint = "/ghalistener"
|
|
||||||
)
|
|
||||||
|
|
||||||
func SetListenerLoggingParameters(level string, format string) bool {
|
func SetListenerLoggingParameters(level string, format string) bool {
|
||||||
switch level {
|
switch level {
|
||||||
@@ -62,12 +59,6 @@ func SetListenerLoggingParameters(level string, format string) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetListenerEntrypoint(entrypoint string) {
|
|
||||||
if entrypoint != "" {
|
|
||||||
scaleSetListenerEntrypoint = entrypoint
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type resourceBuilder struct{}
|
type resourceBuilder struct{}
|
||||||
|
|
||||||
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
|
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
|
||||||
@@ -226,7 +217,6 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
|
|||||||
ports = append(ports, port)
|
ports = append(ports, port)
|
||||||
}
|
}
|
||||||
|
|
||||||
terminationGracePeriodSeconds := int64(60)
|
|
||||||
podSpec := corev1.PodSpec{
|
podSpec := corev1.PodSpec{
|
||||||
ServiceAccountName: serviceAccount.Name,
|
ServiceAccountName: serviceAccount.Name,
|
||||||
Containers: []corev1.Container{
|
Containers: []corev1.Container{
|
||||||
@@ -235,7 +225,7 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
|
|||||||
Image: autoscalingListener.Spec.Image,
|
Image: autoscalingListener.Spec.Image,
|
||||||
Env: listenerEnv,
|
Env: listenerEnv,
|
||||||
Command: []string{
|
Command: []string{
|
||||||
scaleSetListenerEntrypoint,
|
"/github-runnerscaleset-listener",
|
||||||
},
|
},
|
||||||
Ports: ports,
|
Ports: ports,
|
||||||
VolumeMounts: []corev1.VolumeMount{
|
VolumeMounts: []corev1.VolumeMount{
|
||||||
@@ -257,9 +247,8 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
ImagePullSecrets: autoscalingListener.Spec.ImagePullSecrets,
|
ImagePullSecrets: autoscalingListener.Spec.ImagePullSecrets,
|
||||||
RestartPolicy: corev1.RestartPolicyNever,
|
RestartPolicy: corev1.RestartPolicyNever,
|
||||||
TerminationGracePeriodSeconds: &terminationGracePeriodSeconds,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
labels := make(map[string]string, len(autoscalingListener.Labels))
|
labels := make(map[string]string, len(autoscalingListener.Labels))
|
||||||
@@ -311,7 +300,7 @@ func mergeListenerPodWithTemplate(pod *corev1.Pod, tmpl *corev1.PodTemplateSpec)
|
|||||||
c := &tmpl.Spec.Containers[i]
|
c := &tmpl.Spec.Containers[i]
|
||||||
|
|
||||||
switch c.Name {
|
switch c.Name {
|
||||||
case autoscalingListenerContainerName:
|
case "listener":
|
||||||
mergeListenerContainer(listenerContainer, c)
|
mergeListenerContainer(listenerContainer, c)
|
||||||
default:
|
default:
|
||||||
pod.Spec.Containers = append(pod.Spec.Containers, *c)
|
pod.Spec.Containers = append(pod.Spec.Containers, *c)
|
||||||
|
|||||||
@@ -1,8 +1,5 @@
|
|||||||
# About ARC
|
# About ARC
|
||||||
|
|
||||||
> [!WARNING]
|
|
||||||
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
|
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
This document provides a high-level overview of Actions Runner Controller (ARC). ARC enables running Github Actions Runners on Kubernetes (K8s) clusters.
|
This document provides a high-level overview of Actions Runner Controller (ARC). ARC enables running Github Actions Runners on Kubernetes (K8s) clusters.
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# Changing semantics of the `minRunners` field
|
# Changing semantics of the `minRunners` field
|
||||||
|
|
||||||
**Status**: Accepted
|
**Status**: Proposed
|
||||||
|
|
||||||
## Context
|
## Context
|
||||||
|
|
||||||
@@ -1,8 +1,5 @@
|
|||||||
# Authenticating to the GitHub API
|
# Authenticating to the GitHub API
|
||||||
|
|
||||||
> [!WARNING]
|
|
||||||
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
|
|
||||||
|
|
||||||
## Setting Up Authentication with GitHub API
|
## Setting Up Authentication with GitHub API
|
||||||
|
|
||||||
There are two ways for actions-runner-controller to authenticate with the GitHub API (only 1 can be configured at a time however):
|
There are two ways for actions-runner-controller to authenticate with the GitHub API (only 1 can be configured at a time however):
|
||||||
|
|||||||
@@ -1,8 +1,5 @@
|
|||||||
# Automatically scaling runners
|
# Automatically scaling runners
|
||||||
|
|
||||||
> [!WARNING]
|
|
||||||
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
|
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
> If you are using controller version < [v0.22.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.22.0) and you are not using GHES, and so you can't set your rate limit budget, it is recommended that you use 100 replicas or fewer to prevent being rate limited.
|
> If you are using controller version < [v0.22.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.22.0) and you are not using GHES, and so you can't set your rate limit budget, it is recommended that you use 100 replicas or fewer to prevent being rate limited.
|
||||||
|
|||||||
@@ -1,8 +1,5 @@
|
|||||||
# Adding ARC runners to a repository, organization, or enterprise
|
# Adding ARC runners to a repository, organization, or enterprise
|
||||||
|
|
||||||
> [!WARNING]
|
|
||||||
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
|
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
[GitHub self-hosted runners can be deployed at various levels in a management hierarchy](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/about-self-hosted-runners#about-self-hosted-runners):
|
[GitHub self-hosted runners can be deployed at various levels in a management hierarchy](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/about-self-hosted-runners#about-self-hosted-runners):
|
||||||
|
|||||||
@@ -1,8 +1,5 @@
|
|||||||
# Configuring Windows runners
|
# Configuring Windows runners
|
||||||
|
|
||||||
> [!WARNING]
|
|
||||||
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
|
|
||||||
|
|
||||||
## Setting up Windows Runners
|
## Setting up Windows Runners
|
||||||
|
|
||||||
The main two steps in enabling Windows self-hosted runners are:
|
The main two steps in enabling Windows self-hosted runners are:
|
||||||
|
|||||||
@@ -1,8 +1,5 @@
|
|||||||
# Deploying alternative runners
|
# Deploying alternative runners
|
||||||
|
|
||||||
> [!WARNING]
|
|
||||||
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
|
|
||||||
|
|
||||||
## Alternative Runners
|
## Alternative Runners
|
||||||
|
|
||||||
ARC also offers a few alternative runner options
|
ARC also offers a few alternative runner options
|
||||||
|
|||||||
@@ -1,8 +1,5 @@
|
|||||||
# Deploying ARC runners
|
# Deploying ARC runners
|
||||||
|
|
||||||
> [!WARNING]
|
|
||||||
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
|
|
||||||
|
|
||||||
## Deploying runners with RunnerDeployments
|
## Deploying runners with RunnerDeployments
|
||||||
|
|
||||||
In our previous examples we were deploying a single runner via the `RunnerDeployment` kind, the amount of runners deployed can be statically set via the `replicas:` field, we can increase this value to deploy additional sets of runners instead:
|
In our previous examples we were deploying a single runner via the `RunnerDeployment` kind, the amount of runners deployed can be statically set via the `replicas:` field, we can increase this value to deploy additional sets of runners instead:
|
||||||
|
|||||||
@@ -43,35 +43,6 @@ You can follow [this troubleshooting guide](https://docs.github.com/en/actions/h
|
|||||||
|
|
||||||
## Changelog
|
## Changelog
|
||||||
|
|
||||||
### v0.8.2
|
|
||||||
1. Add listener graceful termination period and background context after the message is received [#3187](https://github.com/actions/actions-runner-controller/pull/3187)
|
|
||||||
1. Publish metrics in the new ghalistener [#3193](https://github.com/actions/actions-runner-controller/pull/3193)
|
|
||||||
1. Delete message session when listener.Listen returns [#3240](https://github.com/actions/actions-runner-controller/pull/3240)
|
|
||||||
|
|
||||||
### v0.8.1
|
|
||||||
1. Fix proxy issue in new listener client [#3181](https://github.com/actions/actions-runner-controller/pull/3181)
|
|
||||||
|
|
||||||
### v0.8.0
|
|
||||||
1. Change listener container name [#3167](https://github.com/actions/actions-runner-controller/pull/3167)
|
|
||||||
1. Fix empty env and volumeMounts object on default setup [#3166](https://github.com/actions/actions-runner-controller/pull/3166)
|
|
||||||
1. Fix override listener pod spec [#3161](https://github.com/actions/actions-runner-controller/pull/3161)
|
|
||||||
1. Change minRunners behavior and fix the new listener min runners [#3139](https://github.com/actions/actions-runner-controller/pull/3139)
|
|
||||||
1. Update user agent for new ghalistener [#3138](https://github.com/actions/actions-runner-controller/pull/3138)
|
|
||||||
1. Bump golang.org/x/oauth2 from 0.14.0 to 0.15.0 [#3127](https://github.com/actions/actions-runner-controller/pull/3127)
|
|
||||||
1. Bump golang.org.x.net from 0.18.0 to 0.19.0 [#3126](https://github.com/actions/actions-runner-controller/pull/3126)
|
|
||||||
1. Bump k8s.io/client-go from 0.28.3 to 0.28.4 [#3125](https://github.com/actions/actions-runner-controller/pull/3125)
|
|
||||||
1. Modify user agent format with subsystem and is proxy configured information [#3116](https://github.com/actions/actions-runner-controller/pull/3116)
|
|
||||||
1. Record the error when the creation pod fails [#3112](https://github.com/actions/actions-runner-controller/pull/3112)
|
|
||||||
1. Fix typo in helm chart comment [#3104](https://github.com/actions/actions-runner-controller/pull/3104)
|
|
||||||
1. Set actions client timeout to 5 minutes, add logging to client [#3103](https://github.com/actions/actions-runner-controller/pull/3103)
|
|
||||||
1. Refactor listener app with configurable fallback [#3096](https://github.com/actions/actions-runner-controller/pull/3096)
|
|
||||||
1. Bump github.com/onsi/gomega from 1.29.0 to 1.30.0 [#3094](https://github.com/actions/actions-runner-controller/pull/3094)
|
|
||||||
1. Bump k8s.io/api from 0.28.3 to 0.28.4 [#3093](https://github.com/actions/actions-runner-controller/pull/3093)
|
|
||||||
1. Bump k8s.io/apimachinery from 0.28.3 to 0.28.4 [#3092](https://github.com/actions/actions-runner-controller/pull/3092)
|
|
||||||
1. Bump github.com/gruntwork-io/terratest from 0.41.24 to 0.46.7 [#3091](https://github.com/actions/actions-runner-controller/pull/3091)
|
|
||||||
1. Record a reason for pod failure in EphemeralRunner [#3074](https://github.com/actions/actions-runner-controller/pull/3074)
|
|
||||||
1. ADR: Changing semantics of min runners to be min idle runners [#3040](https://github.com/actions/actions-runner-controller/pull/3040)
|
|
||||||
|
|
||||||
### v0.7.0
|
### v0.7.0
|
||||||
1. Add ResizePolicy and RestartPolicy on mergeListenerContainer [#3075](https://github.com/actions/actions-runner-controller/pull/3075)
|
1. Add ResizePolicy and RestartPolicy on mergeListenerContainer [#3075](https://github.com/actions/actions-runner-controller/pull/3075)
|
||||||
1. feat: GHA controller Helm Chart quoted labels [#3061](https://github.com/actions/actions-runner-controller/pull/3061)
|
1. feat: GHA controller Helm Chart quoted labels [#3061](https://github.com/actions/actions-runner-controller/pull/3061)
|
||||||
|
|||||||
@@ -12,4 +12,4 @@ We do not intend to provide a supported ARC dashboard. This is simply a referenc
|
|||||||
|
|
||||||
1. Make sure to have [Grafana](https://grafana.com/docs/grafana/latest/installation/) and [Prometheus](https://prometheus.io/docs/prometheus/latest/installation/) running in your cluster.
|
1. Make sure to have [Grafana](https://grafana.com/docs/grafana/latest/installation/) and [Prometheus](https://prometheus.io/docs/prometheus/latest/installation/) running in your cluster.
|
||||||
2. Make sure that Prometheus is properly scraping the metrics endpoints of the controller-manager and listeners.
|
2. Make sure that Prometheus is properly scraping the metrics endpoints of the controller-manager and listeners.
|
||||||
3. Import the [dashboard](ARC-Autoscaling-Runner-Set-Monitoring_1692627561838.json) into Grafana.
|
3. Import the [dashboard](ARC-Autoscaling-Runner-Set-Monitoring_1692627561838.json.json) into Grafana.
|
||||||
|
|||||||
@@ -1,8 +1,5 @@
|
|||||||
# Installing ARC
|
# Installing ARC
|
||||||
|
|
||||||
> [!WARNING]
|
|
||||||
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
|
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
By default, actions-runner-controller uses [cert-manager](https://cert-manager.io/docs/installation/kubernetes/) for certificate management of Admission Webhook. Make sure you have already installed cert-manager before you install. The installation instructions for the cert-manager can be found below.
|
By default, actions-runner-controller uses [cert-manager](https://cert-manager.io/docs/installation/kubernetes/) for certificate management of Admission Webhook. Make sure you have already installed cert-manager before you install. The installation instructions for the cert-manager can be found below.
|
||||||
|
|||||||
@@ -1,8 +1,5 @@
|
|||||||
# Managing access with runner groups
|
# Managing access with runner groups
|
||||||
|
|
||||||
> [!WARNING]
|
|
||||||
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
|
|
||||||
|
|
||||||
## Runner Groups
|
## Runner Groups
|
||||||
|
|
||||||
Runner groups can be used to limit which repositories are able to use the GitHub Runner at an organization level. Runner groups have to be [created in GitHub first](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/managing-access-to-self-hosted-runners-using-groups) before they can be referenced.
|
Runner groups can be used to limit which repositories are able to use the GitHub Runner at an organization level. Runner groups have to be [created in GitHub first](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/managing-access-to-self-hosted-runners-using-groups) before they can be referenced.
|
||||||
|
|||||||
@@ -1,8 +1,5 @@
|
|||||||
# Monitoring and troubleshooting
|
# Monitoring and troubleshooting
|
||||||
|
|
||||||
> [!WARNING]
|
|
||||||
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
|
|
||||||
|
|
||||||
## Metrics
|
## Metrics
|
||||||
|
|
||||||
The controller also exposes Prometheus metrics on a `/metrics` endpoint. By default this is on port `8443` behind an RBAC proxy.
|
The controller also exposes Prometheus metrics on a `/metrics` endpoint. By default this is on port `8443` behind an RBAC proxy.
|
||||||
|
|||||||
@@ -1,8 +1,5 @@
|
|||||||
# Actions Runner Controller Quickstart
|
# Actions Runner Controller Quickstart
|
||||||
|
|
||||||
> [!WARNING]
|
|
||||||
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
|
|
||||||
|
|
||||||
GitHub Actions automates the deployment of code to different environments, including production. The environments contain the `GitHub Runner` software which executes the automation. `GitHub Runner` can be run in GitHub-hosted cloud or self-hosted environments. Self-hosted environments offer more control of hardware, operating system, and software tools. They can be run on physical machines, virtual machines, or in a container. Containerized environments are lightweight, loosely coupled, highly efficient and can be managed centrally. However, they are not straightforward to use.
|
GitHub Actions automates the deployment of code to different environments, including production. The environments contain the `GitHub Runner` software which executes the automation. `GitHub Runner` can be run in GitHub-hosted cloud or self-hosted environments. Self-hosted environments offer more control of hardware, operating system, and software tools. They can be run on physical machines, virtual machines, or in a container. Containerized environments are lightweight, loosely coupled, highly efficient and can be managed centrally. However, they are not straightforward to use.
|
||||||
|
|
||||||
`Actions Runner Controller (ARC)` makes it simpler to run self hosted environments on Kubernetes(K8s) cluster.
|
`Actions Runner Controller (ARC)` makes it simpler to run self hosted environments on Kubernetes(K8s) cluster.
|
||||||
|
|||||||
@@ -1,8 +1,5 @@
|
|||||||
# Using ARC across organizations
|
# Using ARC across organizations
|
||||||
|
|
||||||
> [!WARNING]
|
|
||||||
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
|
|
||||||
|
|
||||||
## Multitenancy
|
## Multitenancy
|
||||||
|
|
||||||
> This feature requires controller version => [v0.26.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.26.0)
|
> This feature requires controller version => [v0.26.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.26.0)
|
||||||
|
|||||||
@@ -1,8 +1,5 @@
|
|||||||
# Using ARC runners in a workflow
|
# Using ARC runners in a workflow
|
||||||
|
|
||||||
> [!WARNING]
|
|
||||||
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
|
|
||||||
|
|
||||||
## Runner Labels
|
## Runner Labels
|
||||||
|
|
||||||
To run a workflow job on a self-hosted runner, you can use the following syntax in your workflow:
|
To run a workflow job on a self-hosted runner, you can use the following syntax in your workflow:
|
||||||
|
|||||||
@@ -1,8 +1,5 @@
|
|||||||
# Using custom volumes
|
# Using custom volumes
|
||||||
|
|
||||||
> [!WARNING]
|
|
||||||
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
|
|
||||||
|
|
||||||
## Custom Volume mounts
|
## Custom Volume mounts
|
||||||
|
|
||||||
You can configure your own custom volume mounts. For example to have the work/docker data in memory or on NVME SSD, for
|
You can configure your own custom volume mounts. For example to have the work/docker data in memory or on NVME SSD, for
|
||||||
|
|||||||
@@ -1,8 +1,5 @@
|
|||||||
# Using entrypoint features
|
# Using entrypoint features
|
||||||
|
|
||||||
> [!WARNING]
|
|
||||||
> This documentation covers the legacy mode of ARC (resources in the `actions.summerwind.net` namespace). If you're looking for documentation on the newer autoscaling runner scale sets, it is available in [GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller). To understand why these resources are considered legacy (and the benefits of using the newer autoscaling runner scale sets), read [this discussion (#2775)](https://github.com/actions/actions-runner-controller/discussions/2775).
|
|
||||||
|
|
||||||
## Runner Entrypoint Features
|
## Runner Entrypoint Features
|
||||||
|
|
||||||
> Environment variable values must all be strings
|
> Environment variable values must all be strings
|
||||||
|
|||||||
@@ -109,31 +109,23 @@ type ProxyFunc func(req *http.Request) (*url.URL, error)
|
|||||||
type ClientOption func(*Client)
|
type ClientOption func(*Client)
|
||||||
|
|
||||||
type UserAgentInfo struct {
|
type UserAgentInfo struct {
|
||||||
// Version is the version of the controller
|
Version string
|
||||||
Version string
|
CommitSHA string
|
||||||
// CommitSHA is the git commit SHA of the controller
|
|
||||||
CommitSHA string
|
|
||||||
// ScaleSetID is the ID of the scale set
|
|
||||||
ScaleSetID int
|
ScaleSetID int
|
||||||
// HasProxy is true if the controller is running behind a proxy
|
|
||||||
HasProxy bool
|
|
||||||
// Subsystem is the subsystem such as listener, controller, etc.
|
|
||||||
// Each system may pick its own subsystem name.
|
|
||||||
Subsystem string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u UserAgentInfo) String() string {
|
func (u UserAgentInfo) String() string {
|
||||||
scaleSetID := "NA"
|
var scaleSetID = "NA"
|
||||||
if u.ScaleSetID > 0 {
|
if u.ScaleSetID > 0 {
|
||||||
scaleSetID = strconv.Itoa(u.ScaleSetID)
|
scaleSetID = strconv.Itoa(u.ScaleSetID)
|
||||||
}
|
}
|
||||||
|
|
||||||
proxy := "Proxy/disabled"
|
return fmt.Sprintf(
|
||||||
if u.HasProxy {
|
"actions-runner-controller/%s CommitSHA/%s ScaleSetID/%s",
|
||||||
proxy = "Proxy/enabled"
|
u.Version,
|
||||||
}
|
u.CommitSHA,
|
||||||
|
scaleSetID,
|
||||||
return fmt.Sprintf("actions-runner-controller/%s (%s; %s) ScaleSetID/%s (%s)", u.Version, u.CommitSHA, u.Subsystem, scaleSetID, proxy)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func WithLogger(logger logr.Logger) ClientOption {
|
func WithLogger(logger logr.Logger) ClientOption {
|
||||||
@@ -640,11 +632,8 @@ func (c *Client) doSessionRequest(ctx context.Context, method, path string, requ
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.StatusCode == expectedResponseStatusCode {
|
if resp.StatusCode == expectedResponseStatusCode && responseUnmarshalTarget != nil {
|
||||||
if responseUnmarshalTarget != nil {
|
return json.NewDecoder(resp.Body).Decode(responseUnmarshalTarget)
|
||||||
return json.NewDecoder(resp.Body).Decode(responseUnmarshalTarget)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
|
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// Code generated by mockery v2.36.1. DO NOT EDIT.
|
// Code generated by mockery v2.33.2. DO NOT EDIT.
|
||||||
|
|
||||||
package actions
|
package actions
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// Code generated by mockery v2.36.1. DO NOT EDIT.
|
// Code generated by mockery v2.33.2. DO NOT EDIT.
|
||||||
|
|
||||||
package actions
|
package actions
|
||||||
|
|
||||||
|
|||||||
34
github/actions/testdata/rootCA.crt
vendored
34
github/actions/testdata/rootCA.crt
vendored
@@ -1,20 +1,18 @@
|
|||||||
-----BEGIN CERTIFICATE-----
|
-----BEGIN CERTIFICATE-----
|
||||||
MIIDVTCCAj2gAwIBAgIUOo9VGKll71GYjunZhdMQhS5rP+gwDQYJKoZIhvcNAQEL
|
MIIC6jCCAdICCQCoZFduxPa/eDANBgkqhkiG9w0BAQsFADA2MQswCQYDVQQGEwJV
|
||||||
BQAwOTESMBAGA1UEAwwJbG9jYWxob3N0MQswCQYDVQQGEwJVUzEWMBQGA1UEBwwN
|
UzEnMCUGA1UEAwweYWN0aW9ucy1ydW5uZXItY29udHJvbGxlci10ZXN0MCAXDTIz
|
||||||
U2FuIEZyYW5zaXNjbzAgFw0yNDAxMjIxMjUyNTdaGA8yMDUxMDYwODEyNTI1N1ow
|
MDExOTE2NTAwMVoYDzIwNTAwNjA1MTY1MDAxWjA2MQswCQYDVQQGEwJVUzEnMCUG
|
||||||
OTESMBAGA1UEAwwJbG9jYWxob3N0MQswCQYDVQQGEwJVUzEWMBQGA1UEBwwNU2Fu
|
A1UEAwweYWN0aW9ucy1ydW5uZXItY29udHJvbGxlci10ZXN0MIIBIjANBgkqhkiG
|
||||||
IEZyYW5zaXNjbzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALmyQRuC
|
9w0BAQEFAAOCAQ8AMIIBCgKCAQEAykHCU0I/pdzhnQBwr2N+7so66LPq0cxc8JJL
|
||||||
S13Iat5jMun5zg8tn4E3RZ4x5KWPvRiR9RRX4zo5f/ytmnFVGkSnDhXJkuHRzwWl
|
S2mmk7gg+NWhTZzoci6aYXNRKCyH6B2Wmy7Qveku2wqT2+/4JBMYgTWH5bF7yt76
|
||||||
KjtdW23uUaBfNbJR55O0qUnZWAMNKO1Afm68Tfg+91a5X+KpwGiHfIGZs7UCERYg
|
LB+x9YruSgH/pBN2WI4vRU87NOAU8F0o0U/Lp5vAJoRo+ePPvcHu0OY1WF+QnEX+
|
||||||
6O2iqHQMLCOL/Ytpd6NBF+QFK9klRbfncBJmCR6FEpw1/bGr7HwlldfkPkpHNWUG
|
xtp6gJFGf5DT4U9upwEgQjKgvKFEoB5KNeH1qr2fS2yA2vhm6Uhm+1i/KUQUZ49K
|
||||||
cIqytYBvzo2T2cUyrTysKtATcRg/4Fp0DAZocYfzT6/gL2yWhLwnmxqU7Gbxvrd2
|
GvFK8TQQT4HXft8rPLP5M9OitdqVU8SX0dQoXZ4M41/qydycHOvApj0LlH/XsicZ
|
||||||
6ejFitgxwoM/3rKWuXds7tFMeiKUu2RovGkvDkMEieJWwTufPBJjkIklW5S4iMMi
|
x0mkF90hD+9VRqeYFe562NI4NHR7FGP7HKPWibNjXKC2w+z+aQIDAQABMA0GCSqG
|
||||||
hJnDIn+Ag1nbVHcCAwEAAaNTMFEwHQYDVR0OBBYEFK33e+IWho6FKn4GaxRb2cmv
|
SIb3DQEBCwUAA4IBAQBxaOCnmakd1PPp+pH40OjUktKG1nqM2tGqP0o3Bk7huB2y
|
||||||
mmxjMB8GA1UdIwQYMBaAFK33e+IWho6FKn4GaxRb2cmvmmxjMA8GA1UdEwEB/wQF
|
jXIDi9ETuTeqqHONwwgsKOVY3J+Zt5R+teBSC0qUnypODzu+9v8Xa4Is9G9GyT5S
|
||||||
MAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHZ/Z3CSrPoWb02+iu1cUN8nlQBtAsxI
|
erjpPcJjQnvZyMHLH9DGGWE9UCyqKIqmaEc9bwr2oz1+a0rsaS3ZdIFlQibBHij5
|
||||||
oR3nqhUSEA/9oyyXJt8NIIXauACyYzmNXG87aKQZvVzUEQM0aK4MBq+Pg0Zdnvns
|
tdJcnzXfN4T4GIbYXKMCOYDy/5CiNJ26l/pQNpO9JCzsEmngw0ooS0Bi8EcTCgB6
|
||||||
8QtBvdro7jInHhfn4uS8X21Fa1gYZ0d0C6UHIXUeD9KSEOAX1JT+3VP/7FNIDzns
|
dsHl0w8va3l1kvxWWIlNTGwrAEpRbXmL01hAqx2yCiaFPVZ/eRNWmBWO4LpW4ouK
|
||||||
2ddSxzcji3eVFkDR4/1vRMTng/kiP5vFz1St1op2EYDT+v6PVr9ew3NWUf/w7fgP
|
YOaA+X7geM6XVFlZE3cP58AxYKWHGAThxkZbD5cu
|
||||||
sRRyx3qi7m8SRHc7FwDLk+6/zc1/14YIiX9PrvVmnJj0yULSHiBu4cQccKE2ibos
|
|
||||||
ZeUPfZL8Kl+hs/MtXG/XlYBbApm69eo7EEGHAS/2DIq2yPgsQrGMYkA=
|
|
||||||
-----END CERTIFICATE-----
|
-----END CERTIFICATE-----
|
||||||
|
|||||||
41
github/actions/testdata/server.crt
vendored
41
github/actions/testdata/server.crt
vendored
@@ -1,23 +1,22 @@
|
|||||||
-----BEGIN CERTIFICATE-----
|
-----BEGIN CERTIFICATE-----
|
||||||
MIIDyDCCArCgAwIBAgIUKCU/uCdz/9EcfzL6wd7ubSPrsxIwDQYJKoZIhvcNAQEL
|
MIIDnTCCAoWgAwIBAgIJAJskDVhiEY6fMA0GCSqGSIb3DQEBCwUAMDYxCzAJBgNV
|
||||||
BQAwOTESMBAGA1UEAwwJbG9jYWxob3N0MQswCQYDVQQGEwJVUzEWMBQGA1UEBwwN
|
BAYTAlVTMScwJQYDVQQDDB5hY3Rpb25zLXJ1bm5lci1jb250cm9sbGVyLXRlc3Qw
|
||||||
U2FuIEZyYW5zaXNjbzAgFw0yNDAxMjIxMjU0MTRaGA8yMDUxMDYwODEyNTQxNFow
|
HhcNMjMwMTE5MTY1MTE0WhcNMjQwMTE5MTY1MTE0WjBaMQswCQYDVQQGEwJVUzEi
|
||||||
gYgxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1T
|
MCAGA1UECgwZYWN0aW9ucy1ydW5uZXItY29udHJvbGxlcjEnMCUGA1UECwweYWN0
|
||||||
YW4gRnJhbnNpc2NvMRMwEQYDVQQKDApHaXRIdWJUZXN0MSMwIQYDVQQLDBpHaXRI
|
aW9ucy1ydW5uZXItY29udHJvbGxlciB0ZXN0MIIBIjANBgkqhkiG9w0BAQEFAAOC
|
||||||
dWJUZXN0IEFjdGlvbnMgUnVudGltZTESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjAN
|
AQ8AMIIBCgKCAQEAzOTt1/VjuaHzn+b7jLeufW3rxLHFKQV+LiUiT389rbFGY+DN
|
||||||
BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArVQ7yHHAxehcsOW8NNEplrEF/48n
|
CC+Nzx+DbFBpKcX/scseVhFzlXlrESWWZ4h7LGMXRsTDKs91F1RMuFCd8eIEwbuV
|
||||||
9+XCc4ZWu0LdPdKAjcwMSAddHvLZVp5OUNRTUKgwWfL5DyGFnAhSZ31Ag3FHyoOB
|
civR44IqT5r/0hlMOWemd3Fh/c8KF+9dWQ0q0T3tvlVzEbWNRTVAXTT4JzizqNd1
|
||||||
C5BQSBEd+xsO1Gflt8Pm0A7TN2jzlVx7rq1j7kZ25AZY9oJ6ipK4Hf4mYbfSR5cl
|
1hhnuV/KjhiptPC/8jQ4D9ocZKM8a1pM9O2z3bnmH7VTQJkhjxE7gefQTPQRmvKk
|
||||||
M2WKBPGk9JbYmI7l0t3IYLm954xxfNtPxr1tEAwk75UAKNWXBwqkR31+madOaFsU
|
C7uqvfk2NHTTnKiLfkE10JhLTa0VND2aofNWCybGTyHNNCNlepakoP3KyFC2LjPR
|
||||||
9LJT4aeFJoFs+95tQzvAymGwlE+w6aWiz0WecLSzf8ZgXcRqmQkh1EcP6/2cu5MA
|
oR5iwSnCRDu1z8tDWW+rIa3pfxdQ8LnH4J4CDwIDAQABo4GJMIGGMFAGA1UdIwRJ
|
||||||
CMRJcNly421DYUEbofgoZ8OetkqtFcYk+RyjUBhkQWi8AAQLKJ4q7VZKqwIDAQAB
|
MEehOqQ4MDYxCzAJBgNVBAYTAlVTMScwJQYDVQQDDB5hY3Rpb25zLXJ1bm5lci1j
|
||||||
o3YwdDAfBgNVHSMEGDAWgBSt93viFoaOhSp+BmsUW9nJr5psYzAJBgNVHRMEAjAA
|
b250cm9sbGVyLXRlc3SCCQCoZFduxPa/eDAJBgNVHRMEAjAAMAsGA1UdDwQEAwIE
|
||||||
MAsGA1UdDwQEAwIE8DAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8AAAEwHQYDVR0O
|
8DAaBgNVHREEEzARhwR/AAABgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEB
|
||||||
BBYEFM4ELRkBcflqUtQ/GQK86CjBqjTUMA0GCSqGSIb3DQEBCwUAA4IBAQCMkiid
|
ALdl0ytjellmhtjbXkUZKAl/R2ZXMAVxIOtb4qiN6OOwOMK4p2Wt26p34bQa2JD0
|
||||||
7v2jsSWc8nGOM4Z6vEJ912mKpyyfpWSpM8SxCCxzUrbMrpFx8LB4rmeziy6hNEA0
|
t0qvesI7spQzQObNMdT6NZJl8Ul0ABuzti/Esvmby+VfsFPasCQVXx+jqGhERqXc
|
||||||
yv+h9qiu9l/vVzVc3Q9HA3linEPXqnlUEXd7PV/G/IFoYKFrXi/H+zda9G0Nqt1A
|
SeZFIVWVACyfAc1dkqfGwehSrY62eBlY2PJ1JezagW6aLAnV6Si+96++mkALJDdX
|
||||||
oOKM3t9fsff8KDaRQ2sdSUEjqtAlfg6bbBwO66CICXLU+VUH7hOVghT23UJVvwNY
|
MZhhSqjxM+Nnmhpy4My6oHVrdYWHcuVhzlEmNaMtmJCYuihIyD2Usn32xJK1k89d
|
||||||
Dvkha9TYR+aawRypLoTfT5ZtLp/0A9P+liqo6F5Xm0M89bYLXNPl1fPzY3Ihi5Jd
|
WgEOPCk+ZDAligPlGZS201fsznJk5uIjmxPjjFlJLXotBs8H7j0cQ2JkV5YHsHCk
|
||||||
b6/mttpY9gxTfbw67m2Epfmt1NdOHkY7ac/Hr6pt/YyMBrPz9Z3eZxIXUIVDo/Nh
|
EYf5EJ0ZKtZbwRFeRC1Ajxg=
|
||||||
4O2g9RoFFN4m3A+d
|
|
||||||
-----END CERTIFICATE-----
|
-----END CERTIFICATE-----
|
||||||
|
|||||||
55
github/actions/testdata/server.key
vendored
55
github/actions/testdata/server.key
vendored
@@ -1,28 +1,27 @@
|
|||||||
-----BEGIN PRIVATE KEY-----
|
-----BEGIN RSA PRIVATE KEY-----
|
||||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCtVDvIccDF6Fyw
|
MIIEowIBAAKCAQEAzOTt1/VjuaHzn+b7jLeufW3rxLHFKQV+LiUiT389rbFGY+DN
|
||||||
5bw00SmWsQX/jyf35cJzhla7Qt090oCNzAxIB10e8tlWnk5Q1FNQqDBZ8vkPIYWc
|
CC+Nzx+DbFBpKcX/scseVhFzlXlrESWWZ4h7LGMXRsTDKs91F1RMuFCd8eIEwbuV
|
||||||
CFJnfUCDcUfKg4ELkFBIER37Gw7UZ+W3w+bQDtM3aPOVXHuurWPuRnbkBlj2gnqK
|
civR44IqT5r/0hlMOWemd3Fh/c8KF+9dWQ0q0T3tvlVzEbWNRTVAXTT4JzizqNd1
|
||||||
krgd/iZht9JHlyUzZYoE8aT0ltiYjuXS3chgub3njHF820/GvW0QDCTvlQAo1ZcH
|
1hhnuV/KjhiptPC/8jQ4D9ocZKM8a1pM9O2z3bnmH7VTQJkhjxE7gefQTPQRmvKk
|
||||||
CqRHfX6Zp05oWxT0slPhp4UmgWz73m1DO8DKYbCUT7DppaLPRZ5wtLN/xmBdxGqZ
|
C7uqvfk2NHTTnKiLfkE10JhLTa0VND2aofNWCybGTyHNNCNlepakoP3KyFC2LjPR
|
||||||
CSHURw/r/Zy7kwAIxElw2XLjbUNhQRuh+Chnw562Sq0VxiT5HKNQGGRBaLwABAso
|
oR5iwSnCRDu1z8tDWW+rIa3pfxdQ8LnH4J4CDwIDAQABAoIBAC5rr3c+IVntV0Tj
|
||||||
nirtVkqrAgMBAAECggEAR+/t4ANWPs1xqvmuYz1sRV6zXp3LuNdjHQ9kb9QQftgf
|
EBrRgrboMIJfxEuG8w+BWkSoj1DK2SfHxqwUGgzTFvNzRGAye7vMSRM24Pj8iUVZ
|
||||||
ArrtXfewbmfcTFbnqiR1b8ReTPbK57zB90B88vbJD8S0RxjNNj9vEnoIN2/Dd+Sn
|
Pro2MbHcwWlHKvCID/85GiioGyCyFGHQHgu/4c2pr+xZMZxoHtzintRw28KlJaRG
|
||||||
Mt3brf55K0Yj0pnPu2+7Sel07q6zvZvpwBmk0M3qoCPq4kuY5Pv/jI2+KMVyn94A
|
lt+WHB1L6pE0yt04RMlpRyvW1GIODtEh1wya61Aa2xZMJxgbNWv89znLI2f3ForY
|
||||||
Dc3J6xdKqLNsw7nhUDELHn8DrKQgqucTzi4goJo8Lwc9I8lanTfmbiXj1wYo3nhr
|
QR/he8hQtfJQeH+mv2SvJ1bopkJ58ZObKapuJAWCSxzVRj/yol1MqfUDBy4NrJfY
|
||||||
5DgVcPUceZnsrDNnfkwOaaXKAGUCTi3PWieKq6Cm22oh53s1WS5NJDuk/1NvvfV+
|
F5UP0BSmnED1EdIXeC0duo5RyiSfHqqJlcKR+zlepOb4pr4I1H8P6AIJ9iiunxUJ
|
||||||
+6dyhfmW/jkHHMelox91n1qmLMYnq+GhoK6szapqAQKBgQDLRWZH17zdTNALQzks
|
h9i+YAECgYEA7JgrH5sjStcHdwUgEPN4E1MI3WRwDnnuVpzeSUH7tRqK4lsdIKmF
|
||||||
RbZU9abe+UQV1O5ywdL+4F444IPY2f3gxhEWyL+xAF66ZG0+NA/EO9n7FPqAbgyA
|
u/ss3TMwTgC8XR4JJbVp+6ea54zpVjtBGwShMSvn2+e7OHHg1YoVqBIgRpL+/C4m
|
||||||
Atz0LT7W6o9/AveqBSNs73zxGo7OYlBDq81nCgMzU11nvfTmydJhaMC+6Zyh0Bbc
|
wfon2EglQ0IjscUtKuAR/UyhU6vZtkYRUKeXRKisW4yoobdob0Y4lakCgYEA3bMl
|
||||||
vzIbygpDOL7tg4AyyEcLUNA7BwKBgQDaSnmwMCEdcTENwzVd1mOZdnXRTBPz0u0t
|
BfszC5c0RXI5vsBUBvr9gXMY/8tacM7H8i3vT7JqoYJG6GGST0/HeK+La3w2zryx
|
||||||
aCK5voL99L0+8HyKjtUBtWbBgUxCz7/+mfoNCU+QUHCJksm9vN1m5Zq4r0aEHE36
|
Q8IL6uLy/HZvTHZ+BSp4KzwwgDUIk0jm/JcvzD2ZhJHoAo4aQTc6QI2ZNgjGVwCb
|
||||||
7lYAAeWnltg+OHWqGcSHRZ/zHHs8c/azemvRaTZnZ++meVkfd07jsd+yIYt/G3La
|
nJ0Niaxc4CdSUEAUHH1bCXk/e2stcnieFuiiPPcCgYAIxrA60OdjPEyzloYU+uMG
|
||||||
KV9t86V2PQKBgEfNdfm+vVo2ve6cil+XKHcOZymwR1qm4qvqx4t82guhUzGQn1t8
|
XHskszgQ4Wb84X7BWug6VIy4Tsbq0j76tRt57Q8qpY5XKekO9AbFZfcyBaEWKMaG
|
||||||
26B+vSfbB5szylsErOUWd0N3/5zKQuQdHsuqB96G8LVe6PlH42GhnzLTvMoudEfT
|
eQp9p3JHTvY75sV/Rkr9XAbEd2lr805OvbfCpxJyxz5JttWxFHS2X6RQVTyTLVAx
|
||||||
MjVJliPVONNiiFXVyNjb1eoaP1fxV4IWj669Sa7BJsBjiS9nC6F1pHiVAoGBALBT
|
HLZYvqT+FF6g+QuvrPwmWQKBgAQspVvReQqU1EUie3feAzcGbtOLKUNXvuI04orq
|
||||||
fFxPZFBuAFvHlTIJXUa3I5A+zdckSCVnerVjKFiO+tb+VvttSK4qo6gnEzzcp4+3
|
1oC3qU5VN6SUgb7Aj87z7zoc4qNN5kCSXMsVbuHWEQ5thL3wKMcXoQoo9Xpgewjy
|
||||||
PP6OyNAfyee2xHMZPhZB3WrVWjaYznylTJ6Q6bsn4+DOpm0Sh2dlXEB6fylj2qE7
|
h9Herw9R9/5kUpY7xfsFL4dW7vUga82tH14iQrVtyBz+t+I5cgdhoxJd2EM5hjCE
|
||||||
gCAVxrZchH6Kgu0h6H2QTsuKwS2ZNHr49HbSWpNZAoGBAMrEMiyKYWKgiejs69pj
|
PNnNAoGBALPjmvEZ1HJdCOxY/AisziVtOFc6Glk/KhpSIT7WE1me4qLQFmrsHIDQ
|
||||||
idKifoCDI+Hu1WD/eViUm2OuOfdW9fIBHoeuKmOBKGYIqx5yEbFhXoJmTtJ1aSa1
|
kZ8Sb1f3PQ4T4vHGrtl8qh144MJPI1Nb8klzdlD1xeypGpgXoQb5fsC17g1fgczp
|
||||||
+N+0NBzv9+1W5EII0voELevxLvjeaejcUgLNabGIj1xIcPzaEKTS+Vv2Hn6nffWR
|
TGzq3pvnlGnrgVmnfrWQCHXDLzXtLqM/Pu84guPFftJQ+++yy0np
|
||||||
yKlIixoSTJ+oJShyT9DZyZAd
|
-----END RSA PRIVATE KEY-----
|
||||||
-----END PRIVATE KEY-----
|
|
||||||
|
|||||||
@@ -1,24 +0,0 @@
|
|||||||
package actions_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestUserAgentInfoString(t *testing.T) {
|
|
||||||
userAgentInfo := actions.UserAgentInfo{
|
|
||||||
Version: "0.1.0",
|
|
||||||
CommitSHA: "1234567890abcdef",
|
|
||||||
ScaleSetID: 10,
|
|
||||||
HasProxy: true,
|
|
||||||
Subsystem: "test",
|
|
||||||
}
|
|
||||||
|
|
||||||
userAgent := userAgentInfo.String()
|
|
||||||
expectedProduct := "actions-runner-controller/0.1.0 (1234567890abcdef; test)"
|
|
||||||
assert.Contains(t, userAgent, expectedProduct)
|
|
||||||
expectedScaleSet := "ScaleSetID/10 (Proxy/enabled)"
|
|
||||||
assert.Contains(t, userAgent, expectedScaleSet)
|
|
||||||
}
|
|
||||||
14
go.mod
14
go.mod
@@ -13,7 +13,7 @@ require (
|
|||||||
github.com/google/uuid v1.4.0
|
github.com/google/uuid v1.4.0
|
||||||
github.com/gorilla/mux v1.8.1
|
github.com/gorilla/mux v1.8.1
|
||||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
|
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
|
||||||
github.com/gruntwork-io/terratest v0.46.7
|
github.com/gruntwork-io/terratest v0.41.24
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.5
|
github.com/hashicorp/go-retryablehttp v0.7.5
|
||||||
github.com/kelseyhightower/envconfig v1.4.0
|
github.com/kelseyhightower/envconfig v1.4.0
|
||||||
github.com/onsi/ginkgo v1.16.5
|
github.com/onsi/ginkgo v1.16.5
|
||||||
@@ -25,14 +25,14 @@ require (
|
|||||||
github.com/teambition/rrule-go v1.8.2
|
github.com/teambition/rrule-go v1.8.2
|
||||||
go.uber.org/multierr v1.11.0
|
go.uber.org/multierr v1.11.0
|
||||||
go.uber.org/zap v1.26.0
|
go.uber.org/zap v1.26.0
|
||||||
golang.org/x/net v0.19.0
|
golang.org/x/net v0.18.0
|
||||||
golang.org/x/oauth2 v0.15.0
|
golang.org/x/oauth2 v0.14.0
|
||||||
golang.org/x/sync v0.5.0
|
golang.org/x/sync v0.5.0
|
||||||
gomodules.xyz/jsonpatch/v2 v2.4.0
|
gomodules.xyz/jsonpatch/v2 v2.4.0
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
k8s.io/api v0.28.4
|
k8s.io/api v0.28.4
|
||||||
k8s.io/apimachinery v0.28.4
|
k8s.io/apimachinery v0.28.4
|
||||||
k8s.io/client-go v0.28.4
|
k8s.io/client-go v0.28.3
|
||||||
sigs.k8s.io/controller-runtime v0.16.3
|
sigs.k8s.io/controller-runtime v0.16.3
|
||||||
sigs.k8s.io/yaml v1.4.0
|
sigs.k8s.io/yaml v1.4.0
|
||||||
)
|
)
|
||||||
@@ -89,10 +89,10 @@ require (
|
|||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/stretchr/objx v0.5.1 // indirect
|
github.com/stretchr/objx v0.5.1 // indirect
|
||||||
github.com/urfave/cli v1.22.2 // indirect
|
github.com/urfave/cli v1.22.2 // indirect
|
||||||
golang.org/x/crypto v0.16.0 // indirect
|
golang.org/x/crypto v0.15.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
|
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
|
||||||
golang.org/x/sys v0.15.0 // indirect
|
golang.org/x/sys v0.14.0 // indirect
|
||||||
golang.org/x/term v0.15.0 // indirect
|
golang.org/x/term v0.14.0 // indirect
|
||||||
golang.org/x/text v0.14.0 // indirect
|
golang.org/x/text v0.14.0 // indirect
|
||||||
golang.org/x/time v0.4.0 // indirect
|
golang.org/x/time v0.4.0 // indirect
|
||||||
golang.org/x/tools v0.15.0 // indirect
|
golang.org/x/tools v0.15.0 // indirect
|
||||||
|
|||||||
28
go.sum
28
go.sum
@@ -106,8 +106,8 @@ github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWS
|
|||||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
github.com/gruntwork-io/go-commons v0.8.0 h1:k/yypwrPqSeYHevLlEDmvmgQzcyTwrlZGRaxEM6G0ro=
|
github.com/gruntwork-io/go-commons v0.8.0 h1:k/yypwrPqSeYHevLlEDmvmgQzcyTwrlZGRaxEM6G0ro=
|
||||||
github.com/gruntwork-io/go-commons v0.8.0/go.mod h1:gtp0yTtIBExIZp7vyIV9I0XQkVwiQZze678hvDXof78=
|
github.com/gruntwork-io/go-commons v0.8.0/go.mod h1:gtp0yTtIBExIZp7vyIV9I0XQkVwiQZze678hvDXof78=
|
||||||
github.com/gruntwork-io/terratest v0.46.7 h1:oqGPBBO87SEsvBYaA0R5xOq+Lm2Xc5dmFVfxEolfZeU=
|
github.com/gruntwork-io/terratest v0.41.24 h1:j6T6qe4deVvynTG2UmnjGwZy83he6xKgTaYWiSdFv/w=
|
||||||
github.com/gruntwork-io/terratest v0.46.7/go.mod h1:6gI5MlLeyF+SLwqocA5GBzcTix+XiuxCy1BPwKuT+WM=
|
github.com/gruntwork-io/terratest v0.41.24/go.mod h1:O6gajNBjO1wvc7Wl9WtbO+ORcdnhAV2GQiBE71ycwIk=
|
||||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||||
@@ -233,8 +233,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
|||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||||
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
|
golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA=
|
||||||
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
|
||||||
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ=
|
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ=
|
||||||
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE=
|
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE=
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
@@ -255,10 +255,10 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
|
|||||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||||
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
|
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
|
||||||
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
|
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
|
||||||
golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ=
|
golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0=
|
||||||
golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
|
golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@@ -288,15 +288,15 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
|
||||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||||
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
|
golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8=
|
||||||
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
|
golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
@@ -357,8 +357,8 @@ k8s.io/apiextensions-apiserver v0.28.3 h1:Od7DEnhXHnHPZG+W9I97/fSQkVpVPQx2diy+2E
|
|||||||
k8s.io/apiextensions-apiserver v0.28.3/go.mod h1:NE1XJZ4On0hS11aWWJUTNkmVB03j9LM7gJSisbRt8Lc=
|
k8s.io/apiextensions-apiserver v0.28.3/go.mod h1:NE1XJZ4On0hS11aWWJUTNkmVB03j9LM7gJSisbRt8Lc=
|
||||||
k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8=
|
k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8=
|
||||||
k8s.io/apimachinery v0.28.4/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg=
|
k8s.io/apimachinery v0.28.4/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg=
|
||||||
k8s.io/client-go v0.28.4 h1:Np5ocjlZcTrkyRJ3+T3PkXDpe4UpatQxj85+xjaD2wY=
|
k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4=
|
||||||
k8s.io/client-go v0.28.4/go.mod h1:0VDZFpgoZfelyP5Wqu0/r/TRYcLYuJ2U1KEeoaPa1N4=
|
k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo=
|
||||||
k8s.io/component-base v0.28.3 h1:rDy68eHKxq/80RiMb2Ld/tbH8uAE75JdCqJyi6lXMzI=
|
k8s.io/component-base v0.28.3 h1:rDy68eHKxq/80RiMb2Ld/tbH8uAE75JdCqJyi6lXMzI=
|
||||||
k8s.io/component-base v0.28.3/go.mod h1:fDJ6vpVNSk6cRo5wmDa6eKIG7UlIQkaFmZN2fYgIUD8=
|
k8s.io/component-base v0.28.3/go.mod h1:fDJ6vpVNSk6cRo5wmDa6eKIG7UlIQkaFmZN2fYgIUD8=
|
||||||
k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
|
k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
|
||||||
|
|||||||
92
hack/e2e-test.sh
Executable file
92
hack/e2e-test.sh
Executable file
@@ -0,0 +1,92 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
DIR="$(dirname "${BASH_SOURCE[0]}")"
|
||||||
|
|
||||||
|
DIR="$(realpath "${DIR}")"
|
||||||
|
|
||||||
|
TEST_DIR="$(realpath "${DIR}/../test/actions.github.com")"
|
||||||
|
|
||||||
|
export PLATFORMS="linux/amd64"
|
||||||
|
|
||||||
|
TARGETS=()
|
||||||
|
|
||||||
|
function set_targets() {
|
||||||
|
local cases="$(find "${TEST_DIR}" -name '*.test.sh' | sed "s#^${TEST_DIR}/##g" )"
|
||||||
|
|
||||||
|
mapfile -t TARGETS < <(echo "${cases}")
|
||||||
|
|
||||||
|
echo $TARGETS
|
||||||
|
}
|
||||||
|
|
||||||
|
function env_test() {
|
||||||
|
if [[ -z "${GITHUB_TOKEN}" ]]; then
|
||||||
|
echo "Error: GITHUB_TOKEN is not set"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z "${TARGET_ORG}" ]]; then
|
||||||
|
echo "Error: TARGET_ORG is not set"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z "${TARGET_REPO}" ]]; then
|
||||||
|
echo "Error: TARGET_REPO is not set"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function usage() {
|
||||||
|
echo "Usage: $0 [test_name]"
|
||||||
|
echo " test_name: the name of the test to run"
|
||||||
|
echo " if not specified, all tests will be run"
|
||||||
|
echo " test_name should be the name of the test file without the .test.sh suffix"
|
||||||
|
echo ""
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
function main() {
|
||||||
|
local failed=()
|
||||||
|
|
||||||
|
env_test
|
||||||
|
|
||||||
|
if [[ -z "${1}" ]]; then
|
||||||
|
echo "Running all tests"
|
||||||
|
set_targets
|
||||||
|
elif [[ -f "${TEST_DIR}/${1}.test.sh" ]]; then
|
||||||
|
echo "Running test ${1}"
|
||||||
|
TARGETS=("${1}.test.sh")
|
||||||
|
else
|
||||||
|
usage
|
||||||
|
fi
|
||||||
|
|
||||||
|
for target in "${TARGETS[@]}"; do
|
||||||
|
echo "============================================================"
|
||||||
|
test="${TEST_DIR}/${target}"
|
||||||
|
if [[ ! -x "${test}" ]]; then
|
||||||
|
echo "Error: test ${test} is not executable or not found"
|
||||||
|
failed+=("${test}")
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Running test ${target}"
|
||||||
|
if ! "${test}"; then
|
||||||
|
failed+=("${target}")
|
||||||
|
echo "---------------------------------"
|
||||||
|
echo "FAILED: ${target}"
|
||||||
|
else
|
||||||
|
echo "---------------------------------"
|
||||||
|
echo "PASSED: ${target}"
|
||||||
|
fi
|
||||||
|
echo "============================================================"
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ "${#failed[@]}" -gt 0 ]]; then
|
||||||
|
echo "Failed tests:"
|
||||||
|
for fail in "${failed[@]}"; do
|
||||||
|
echo " ${fail}"
|
||||||
|
done
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
main $@
|
||||||
2
main.go
2
main.go
@@ -203,8 +203,6 @@ func main() {
|
|||||||
log.Info("Using default AutoscalingListener logging parameters", "LogLevel", actionsgithubcom.DefaultScaleSetListenerLogLevel, "LogFormat", actionsgithubcom.DefaultScaleSetListenerLogFormat)
|
log.Info("Using default AutoscalingListener logging parameters", "LogLevel", actionsgithubcom.DefaultScaleSetListenerLogLevel, "LogFormat", actionsgithubcom.DefaultScaleSetListenerLogFormat)
|
||||||
}
|
}
|
||||||
|
|
||||||
actionsgithubcom.SetListenerEntrypoint(os.Getenv("LISTENER_ENTRYPOINT"))
|
|
||||||
|
|
||||||
var webhookServer webhook.Server
|
var webhookServer webhook.Server
|
||||||
if port != 0 {
|
if port != 0 {
|
||||||
webhookServer = webhook.NewServer(webhook.Options{
|
webhookServer = webhook.NewServer(webhook.Options{
|
||||||
|
|||||||
@@ -6,8 +6,8 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless
|
|||||||
OS_IMAGE ?= ubuntu-22.04
|
OS_IMAGE ?= ubuntu-22.04
|
||||||
TARGETPLATFORM ?= $(shell arch)
|
TARGETPLATFORM ?= $(shell arch)
|
||||||
|
|
||||||
RUNNER_VERSION ?= 2.312.0
|
RUNNER_VERSION ?= 2.311.0
|
||||||
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.5.1
|
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.5.0
|
||||||
DOCKER_VERSION ?= 24.0.7
|
DOCKER_VERSION ?= 24.0.7
|
||||||
|
|
||||||
# default list of platforms for which multiarch image is built
|
# default list of platforms for which multiarch image is built
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
RUNNER_VERSION=2.312.0
|
RUNNER_VERSION=2.311.0
|
||||||
RUNNER_CONTAINER_HOOKS_VERSION=0.5.1
|
RUNNER_CONTAINER_HOOKS_VERSION=0.5.0
|
||||||
84
test/actions.github.com/anonymous-proxy-setup.test.sh
Executable file
84
test/actions.github.com/anonymous-proxy-setup.test.sh
Executable file
@@ -0,0 +1,84 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
DIR="$(dirname "${BASH_SOURCE[0]}")"
|
||||||
|
|
||||||
|
DIR="$(realpath "${DIR}")"
|
||||||
|
|
||||||
|
ROOT_DIR="$(ralpath "${DIR}/../../")"
|
||||||
|
|
||||||
|
source "${DIR}/helper.sh"
|
||||||
|
|
||||||
|
SCALE_SET_NAME="anonymous-proxy-$(date '+%M%S')$((($RANDOM + 100) % 100 + 1))"
|
||||||
|
SCALE_SET_NAMESPACE="arc-runners"
|
||||||
|
WORKFLOW_FILE="arc-test-workflow.yaml"
|
||||||
|
ARC_NAME="arc"
|
||||||
|
ARC_NAMESPACE="arc-systems"
|
||||||
|
|
||||||
|
function install_arc() {
|
||||||
|
echo "Creating namespace ${ARC_NAMESPACE}"
|
||||||
|
kubectl create namespace "${SCALE_SET_NAMESPACE}"
|
||||||
|
|
||||||
|
echo "Installing ARC"
|
||||||
|
helm install "${ARC_NAME}" \
|
||||||
|
--namespace "${ARC_NAMESPACE}" \
|
||||||
|
--create-namespace \
|
||||||
|
--set image.repository="${IMAGE_NAME}" \
|
||||||
|
--set image.tag="${IMAGE_TAG}" \
|
||||||
|
${ROOT_DIR}/charts/gha-runner-scale-set-controller \
|
||||||
|
--debug
|
||||||
|
|
||||||
|
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
|
||||||
|
NAMESPACE="${ARC_NAMESPACE}" log_arc
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function install_scale_set() {
|
||||||
|
echo "Installing scale set ${SCALE_SET_NAME}/${SCALE_SET_NAMESPACE}"
|
||||||
|
helm install "${SCALE_SET_NAME}" \
|
||||||
|
--namespace "${SCALE_SET_NAMESPACE}" \
|
||||||
|
--create-namespace \
|
||||||
|
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
|
||||||
|
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
|
||||||
|
--set proxy.https.url="http://host.minikube.internal:3128" \
|
||||||
|
--set "proxy.noProxy[0]=10.96.0.1:443" \
|
||||||
|
"${ROOT_DIR}/charts/gha-runner-scale-set" \
|
||||||
|
--debug
|
||||||
|
|
||||||
|
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
|
||||||
|
NAMESPACE="${ARC_NAMESPACE}" log_arc
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function run_squid() {
|
||||||
|
echo "Running squid"
|
||||||
|
docker run -d \
|
||||||
|
--name squid \
|
||||||
|
--publish 3128:3128 \
|
||||||
|
ubuntu/squid:latest
|
||||||
|
}
|
||||||
|
|
||||||
|
function main() {
|
||||||
|
local failed=()
|
||||||
|
|
||||||
|
build_image
|
||||||
|
create_cluster
|
||||||
|
|
||||||
|
install_arc
|
||||||
|
run_squid
|
||||||
|
install_scale_set
|
||||||
|
|
||||||
|
|
||||||
|
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
|
||||||
|
|
||||||
|
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
|
||||||
|
|
||||||
|
delete_cluster
|
||||||
|
|
||||||
|
print_failed_tests "${failed[@]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
main
|
||||||
91
test/actions.github.com/auth-proxy-setup.test.sh
Executable file
91
test/actions.github.com/auth-proxy-setup.test.sh
Executable file
@@ -0,0 +1,91 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
DIR="$(dirname "${BASH_SOURCE[0]}")"
|
||||||
|
|
||||||
|
DIR="$(realpath "${DIR}")"
|
||||||
|
|
||||||
|
ROOT_DIR="$(ralpath "${DIR}/../../")"
|
||||||
|
|
||||||
|
source "${DIR}/helper.sh"
|
||||||
|
|
||||||
|
SCALE_SET_NAME="auth-proxy-$(date '+%M%S')$((($RANDOM + 100) % 100 + 1))"
|
||||||
|
SCALE_SET_NAMESPACE="arc-runners"
|
||||||
|
WORKFLOW_FILE="arc-test-workflow.yaml"
|
||||||
|
ARC_NAME="arc"
|
||||||
|
ARC_NAMESPACE="arc-systems"
|
||||||
|
|
||||||
|
function install_arc() {
|
||||||
|
echo "Creating namespace ${ARC_NAMESPACE}"
|
||||||
|
kubectl create namespace "${SCALE_SET_NAMESPACE}"
|
||||||
|
|
||||||
|
echo "Installing ARC"
|
||||||
|
helm install "${ARC_NAME}" \
|
||||||
|
--namespace "${ARC_NAMESPACE}" \
|
||||||
|
--create-namespace \
|
||||||
|
--set image.repository="${IMAGE_NAME}" \
|
||||||
|
--set image.tag="${IMAGE_TAG}" \
|
||||||
|
${ROOT_DIR}/charts/gha-runner-scale-set-controller \
|
||||||
|
--debug
|
||||||
|
|
||||||
|
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
|
||||||
|
NAMESPACE="${ARC_NAMESPACE}" log_arc
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function install_scale_set() {
|
||||||
|
echo "Creating namespace ${SCALE_SET_NAMESPACE}"
|
||||||
|
kubectl create namespace "${SCALE_SET_NAMESPACE}"
|
||||||
|
|
||||||
|
echo "Installing proxy secret"
|
||||||
|
kubectl create secret generic proxy-auth \
|
||||||
|
--namespace="${SCALE_SET_NAMESPACE}" \
|
||||||
|
--from-literal=username=github \
|
||||||
|
--from-literal=password='actions'
|
||||||
|
|
||||||
|
echo "Installing scale set ${SCALE_SET_NAME}/${SCALE_SET_NAMESPACE}"
|
||||||
|
helm install "${SCALE_SET_NAME}" \
|
||||||
|
--namespace "${SCALE_SET_NAMESPACE}" \
|
||||||
|
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
|
||||||
|
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
|
||||||
|
--set proxy.https.url="http://host.minikube.internal:3128" \
|
||||||
|
--set proxy.https.credentialSecretRef="proxy-auth" \
|
||||||
|
--set "proxy.noProxy[0]=10.96.0.1:443" \
|
||||||
|
"${ROOT_DIR}/charts/gha-runner-scale-set" \
|
||||||
|
--debug
|
||||||
|
|
||||||
|
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
|
||||||
|
NAMESPACE="${ARC_NAMESPACE}" log_arc
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function run_squid() {
|
||||||
|
echo "Running squid"
|
||||||
|
docker run -d \
|
||||||
|
--name squid \
|
||||||
|
--publish 3128:3128 \
|
||||||
|
huangtingluo/squid-proxy:latest
|
||||||
|
}
|
||||||
|
|
||||||
|
function main() {
|
||||||
|
local failed=()
|
||||||
|
|
||||||
|
build_image
|
||||||
|
create_cluster
|
||||||
|
install_arc
|
||||||
|
run_squid
|
||||||
|
install_scale_set
|
||||||
|
|
||||||
|
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
|
||||||
|
|
||||||
|
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
|
||||||
|
|
||||||
|
delete_cluster
|
||||||
|
|
||||||
|
print_results "${failed[@]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
main
|
||||||
73
test/actions.github.com/default-setup.test.sh
Executable file
73
test/actions.github.com/default-setup.test.sh
Executable file
@@ -0,0 +1,73 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
DIR="$(dirname "${BASH_SOURCE[0]}")"
|
||||||
|
|
||||||
|
DIR="$(realpath "${DIR}")"
|
||||||
|
|
||||||
|
ROOT_DIR="$(realpath "${DIR}/../..")"
|
||||||
|
|
||||||
|
source "${DIR}/helper.sh"
|
||||||
|
|
||||||
|
SCALE_SET_NAME="default-$(date +'%M%S')$(((${RANDOM} + 100) % 100 + 1))"
|
||||||
|
SCALE_SET_NAMESPACE="arc-runners"
|
||||||
|
WORKFLOW_FILE="arc-test-workflow.yaml"
|
||||||
|
ARC_NAME="arc"
|
||||||
|
ARC_NAMESPACE="arc-systems"
|
||||||
|
|
||||||
|
function install_arc() {
|
||||||
|
echo "Creating namespace ${ARC_NAMESPACE}"
|
||||||
|
kubectl create namespace "${SCALE_SET_NAMESPACE}"
|
||||||
|
|
||||||
|
echo "Installing ARC"
|
||||||
|
helm install "${ARC_NAME}" \
|
||||||
|
--namespace "${ARC_NAMESPACE}" \
|
||||||
|
--create-namespace \
|
||||||
|
--set image.repository="${IMAGE_NAME}" \
|
||||||
|
--set image.tag="${IMAGE_TAG}" \
|
||||||
|
${ROOT_DIR}/charts/gha-runner-scale-set-controller \
|
||||||
|
--debug
|
||||||
|
|
||||||
|
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
|
||||||
|
NAMESPACE="${ARC_NAMESPACE}" log_arc
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function install_scale_set() {
|
||||||
|
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME}"
|
||||||
|
helm install "${SCALE_SET_NAME}" \
|
||||||
|
--namespace "${SCALE_SET_NAMESPACE}" \
|
||||||
|
--create-namespace \
|
||||||
|
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
|
||||||
|
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
|
||||||
|
${ROOT_DIR}/charts/gha-runner-scale-set \
|
||||||
|
--version="${VERSION}" \
|
||||||
|
--debug
|
||||||
|
|
||||||
|
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
|
||||||
|
NAMESPACE="${ARC_NAMESPACE}" log_arc
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function main() {
|
||||||
|
local failed=()
|
||||||
|
|
||||||
|
build_image
|
||||||
|
create_cluster
|
||||||
|
|
||||||
|
install_arc
|
||||||
|
install_scale_set
|
||||||
|
|
||||||
|
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
|
||||||
|
|
||||||
|
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
|
||||||
|
|
||||||
|
delete_cluster
|
||||||
|
|
||||||
|
print_results "${failed[@]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
main
|
||||||
70
test/actions.github.com/dind-setup.test.sh
Executable file
70
test/actions.github.com/dind-setup.test.sh
Executable file
@@ -0,0 +1,70 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
DIR="$(dirname "${BAASH_SOURCE[0]}")"
|
||||||
|
|
||||||
|
DIR="$(realpath "${DIR}")"
|
||||||
|
|
||||||
|
ROOT_DIR="$(realpath "${DIR}/../../")"
|
||||||
|
|
||||||
|
source "${DIR}/helper.sh"
|
||||||
|
|
||||||
|
SCALE_SET_NAME="dind-$(date +'%M%S')$(((${RANDOM} + 100) % 100 + 1))"
|
||||||
|
SCALE_SET_NAMESPACE="arc-runners"
|
||||||
|
WORKFLOW_FILE="arc-test-dind-workflow.yaml"
|
||||||
|
ARC_NAME="arc"
|
||||||
|
ARC_NAMESPACE="arc-systems"
|
||||||
|
|
||||||
|
function install_arc() {
|
||||||
|
echo "Installing ARC"
|
||||||
|
|
||||||
|
helm install "${ARC_NAME}" \
|
||||||
|
--namespace "${ARC_NAMESPACE}" \
|
||||||
|
--create-namespace \
|
||||||
|
--set image.repository="${IMAGE_NAME}" \
|
||||||
|
--set image.tag="${IMAGE_TAG}" \
|
||||||
|
${ROOT_DIR}/charts/gha-runner-scale-set-controller \
|
||||||
|
--debug
|
||||||
|
|
||||||
|
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
|
||||||
|
NAMESPACE="${ARC_NAMESPACE}" log_arc
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function install_scale_set() {
|
||||||
|
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME}"
|
||||||
|
|
||||||
|
helm install "${SCALE_SET_NAME}" \
|
||||||
|
--namespace "${SCALE_SET_NAMESPACE}" \
|
||||||
|
--create-namespace \
|
||||||
|
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
|
||||||
|
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
|
||||||
|
--set containerMode.type="dind" \
|
||||||
|
${ROOT_DIR}/charts/gha-runner-scale-set \
|
||||||
|
--debug
|
||||||
|
|
||||||
|
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" wait_for_scale_set; then
|
||||||
|
NAMESPACE="${}"
|
||||||
|
}
|
||||||
|
|
||||||
|
function main() {
|
||||||
|
local failed=()
|
||||||
|
|
||||||
|
build_image
|
||||||
|
create_cluster
|
||||||
|
|
||||||
|
install_arc
|
||||||
|
install_scale_set
|
||||||
|
|
||||||
|
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
|
||||||
|
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
|
||||||
|
|
||||||
|
NAMESPACE="${ARC_NAMESPACE}" arc_logs
|
||||||
|
delete_cluster
|
||||||
|
|
||||||
|
print_results "${failed[@]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
main
|
||||||
3
test/actions.github.com/envrc.example
Normal file
3
test/actions.github.com/envrc.example
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
export TARGET_ORG="org"
|
||||||
|
export TARGET_REPO="repo"
|
||||||
|
export GITHUB_TOKEN="token"
|
||||||
165
test/actions.github.com/helper.sh
Normal file
165
test/actions.github.com/helper.sh
Normal file
@@ -0,0 +1,165 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
DIR="$(dirname "${BASH_SOURCE[0]}")"
|
||||||
|
|
||||||
|
DIR="$(realpath "${DIR}")"
|
||||||
|
|
||||||
|
ROOT_DIR="$(realpath "${DIR}/../..")"
|
||||||
|
|
||||||
|
export TARGET_ORG="${TARGET_ORG:-actions-runner-controller}"
|
||||||
|
export TARGET_REPO="${TARGET_REPO:-arc_e2e_test_dummy}"
|
||||||
|
export IMAGE_NAME="${IMAGE_NAME:-arc-test-image}"
|
||||||
|
export VERSION="${VERSION:-$(yq .version < "${ROOT_DIR}/charts/gha-runner-scale-set-controller/Chart.yaml")}"
|
||||||
|
export IMAGE_VERSION="${IMAGE_VERSION:-${VERSION}}"
|
||||||
|
|
||||||
|
function build_image() {
|
||||||
|
echo "Building ARC image ${IMAGE_NAME}:${IMAGE_VERSION}"
|
||||||
|
|
||||||
|
cd ${ROOT_DIR}
|
||||||
|
|
||||||
|
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||||
|
export DOCKER_BUILDKIT=1
|
||||||
|
docker buildx build --platform ${PLATFORMS} \
|
||||||
|
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
|
||||||
|
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
|
||||||
|
--build-arg VERSION=${VERSION} \
|
||||||
|
--build-arg COMMIT_SHA=${COMMIT_SHA} \
|
||||||
|
-t "${IMAGE_NAME}:${IMAGE_VERSION}" \
|
||||||
|
-f Dockerfile \
|
||||||
|
. --load
|
||||||
|
|
||||||
|
echo "Created image ${IMAGE_NAME}:${IMAGE_VERSION}"
|
||||||
|
cd -
|
||||||
|
}
|
||||||
|
|
||||||
|
function create_cluster() {
|
||||||
|
echo "Deleting minikube cluster if exists"
|
||||||
|
minikube delete || true
|
||||||
|
|
||||||
|
echo "Creating minikube cluster"
|
||||||
|
minikube start
|
||||||
|
|
||||||
|
echo "Loading image into minikube cluster"
|
||||||
|
minikube image load "${IMAGE_NAME}:${IMAGE_VERSION}"
|
||||||
|
}
|
||||||
|
|
||||||
|
function delete_cluster() {
|
||||||
|
echo "Deleting minikube cluster"
|
||||||
|
minikube delete
|
||||||
|
}
|
||||||
|
|
||||||
|
function log_arc() {
|
||||||
|
echo "ARC logs"
|
||||||
|
kubectl logs -n "${NAMESPACE}" -l app.kubernetes.io/name=gha-rs-controller
|
||||||
|
}
|
||||||
|
|
||||||
|
function wait_for_arc() {
|
||||||
|
echo "Waiting for ARC to be ready"
|
||||||
|
local count=0;
|
||||||
|
while true; do
|
||||||
|
POD_NAME=$(kubectl get pods -n ${NAMESPACE} -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||||
|
if [ -n "$POD_NAME" ]; then
|
||||||
|
echo "Pod found: $POD_NAME"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
|
||||||
|
kubectl wait --timeout=30s --for=condition=ready pod -n "${NAMESPACE}" -l app.kubernetes.io/name=gha-rs-controller
|
||||||
|
kubectl get pod -n "${NAMESPACE}"
|
||||||
|
kubectl describe deployment "${NAME}" -n "${NAMESPACE}"
|
||||||
|
}
|
||||||
|
|
||||||
|
function wait_for_scale_set() {
|
||||||
|
local count=0
|
||||||
|
while true; do
|
||||||
|
POD_NAME=$(kubectl get pods -n ${NAMESPACE} -l actions.github.com/scale-set-name=${NAME} -o name)
|
||||||
|
if [ -n "$POD_NAME" ]; then
|
||||||
|
echo "Pod found: ${POD_NAME}"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$count" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=${NAME}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
kubectl wait --timeout=30s --for=condition=ready pod -n ${NAMESPACE} -l actions.github.com/scale-set-name=${NAME}
|
||||||
|
kubectl get pod -n ${NAMESPACE}
|
||||||
|
}
|
||||||
|
|
||||||
|
function cleanup_scale_set() {
|
||||||
|
helm uninstall "${INSTALLATION_NAME}" --namespace "${NAMESPACE}" --debug
|
||||||
|
|
||||||
|
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n "${NAMESPACE}" -l app.kubernetes.io/instance="${INSTALLATION_NAME}"
|
||||||
|
}
|
||||||
|
|
||||||
|
function install_openebs() {
|
||||||
|
echo "Install openebs/dynamic-localpv-provisioner"
|
||||||
|
helm repo add openebs https://openebs.github.io/charts
|
||||||
|
helm repo update
|
||||||
|
helm install openebs openebs/openebs --namespace openebs --create-namespace
|
||||||
|
}
|
||||||
|
|
||||||
|
function print_results() {
|
||||||
|
local failed=("$@")
|
||||||
|
|
||||||
|
if [[ "${#failed[@]}" -ne 0 ]]; then
|
||||||
|
echo "----------------------------------"
|
||||||
|
echo "The following tests failed:"
|
||||||
|
for test in "${failed[@]}"; do
|
||||||
|
echo " - ${test}"
|
||||||
|
done
|
||||||
|
return 1
|
||||||
|
else
|
||||||
|
echo "----------------------------------"
|
||||||
|
echo "All tests passed!"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function run_workflow() {
|
||||||
|
echo "Checking if the workflow file exists"
|
||||||
|
gh workflow view -R "${TARGET_ORG}/${TARGET_REPO}" "${WORKFLOW_FILE}" &> /dev/null || return 1
|
||||||
|
|
||||||
|
local queue_time="$(date -u +%FT%TZ)"
|
||||||
|
|
||||||
|
echo "Running workflow ${workflow_file}"
|
||||||
|
gh workflow run -R "${TARGET_ORG}/${TARGET_REPO}" "${WORKFLOW_FILE}" --ref main -f arc_name="${SCALE_SET_NAME}" || return 1
|
||||||
|
|
||||||
|
echo "Waiting for run to start"
|
||||||
|
local count=0
|
||||||
|
local run_id=
|
||||||
|
while true; do
|
||||||
|
if [[ "${count}" -ge 12 ]]; then
|
||||||
|
echo "Timeout waiting for run to start"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
run_id=$(gh run list -R "${TARGET_ORG}/${TARGET_REPO}" --workflow "${WORKFLOW_FILE}" --created ">${queue_time}" --json "name,databaseId" --jq ".[] | select(.name | contains(\"${SCALE_SET_NAME}\")) | .databaseId")
|
||||||
|
echo "Run ID: ${run_id}"
|
||||||
|
if [ -n "$run_id" ]; then
|
||||||
|
echo "Run found!"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Run not found yet, waiting 5 seconds"
|
||||||
|
sleep 5
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Waiting for run to complete"
|
||||||
|
local code=$(gh run watch "${run_id}" -R "${TARGET_ORG}/${TARGET_REPO}" --exit-status &> /dev/null)
|
||||||
|
if [[ "${code}" -ne 0 ]]; then
|
||||||
|
echo "Run failed with exit code ${code}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Run completed successfully"
|
||||||
|
}
|
||||||
74
test/actions.github.com/kubernetes-mode-setup.test.sh
Executable file
74
test/actions.github.com/kubernetes-mode-setup.test.sh
Executable file
@@ -0,0 +1,74 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
DIR="$(dirname "${BASH_SOURCE[0]}")"
|
||||||
|
|
||||||
|
DIR="$(realpath "${DIR}")"
|
||||||
|
|
||||||
|
ROOT_DIR="$(ralpath "${DIR}/../../")"
|
||||||
|
|
||||||
|
source "${DIR}/helper.sh"
|
||||||
|
|
||||||
|
SCALE_SET_NAME="kube-mode-$(date '%M%S')$((($RANDOM + 100) % 100 + 1))"
|
||||||
|
SCALE_SET_NAMESPACE="arc-runners"
|
||||||
|
WORKFLOW_FILE="arc-test-kubernetes-workflow.yaml"
|
||||||
|
ARC_NAME="arc"
|
||||||
|
ARC_NAMESPACE="arc-systems"
|
||||||
|
|
||||||
|
function install_arc() {
|
||||||
|
echo "Installing ARC"
|
||||||
|
helm install "${ARC_NAME}" \
|
||||||
|
--namespace "${ARC_NAMESPACE}" \
|
||||||
|
--create-namespace \
|
||||||
|
--set image.repository="${IMAGE_NAME}" \
|
||||||
|
--set image.tag="${IMAGE_VERSION}" \
|
||||||
|
"${ROOT_DIR}/charts/gha-runner-scale-set-controller" \
|
||||||
|
--debug
|
||||||
|
|
||||||
|
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
|
||||||
|
NAMESPACE="${ARC_NAMESPACE}" log_arc
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function install_scale_set() {
|
||||||
|
echo "Installing scale set ${SCALE_SET_NAME}/${SCALE_SET_NAMESPACE}"
|
||||||
|
helm install "${SCALE_SET_NAME}" \
|
||||||
|
--namespace "${SCALE_SET_NAMESPACE}" \
|
||||||
|
--create-namespace \
|
||||||
|
-- githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
|
||||||
|
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
|
||||||
|
--set containerMode.type="kubernetes" \
|
||||||
|
--set containerMode.kubernetesModeWorkVolumeClaim.accessModes={"ReadWriteOnce"} \
|
||||||
|
--set containerMode.kubernetesModeWorkVolumeClaim.storageClassName="openebs-hostpath" \
|
||||||
|
--set containerModde.kubernetesModeWorkVolumeClaim.resources.requests.storage="1Gi" \
|
||||||
|
"${ROOT_DIR}/charts/gha-runner-scale-set" \
|
||||||
|
--debug
|
||||||
|
|
||||||
|
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
|
||||||
|
NAMESPACE="${ARC_NAMESPACE}" log_arc
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function main() {
|
||||||
|
local failed=()
|
||||||
|
|
||||||
|
build_image
|
||||||
|
create_cluster
|
||||||
|
install_openebs
|
||||||
|
install_arc
|
||||||
|
install_scale_set
|
||||||
|
|
||||||
|
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
|
||||||
|
|
||||||
|
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
|
||||||
|
|
||||||
|
NAMESPACE="${ARC_NAMESPACE}" arc_logs
|
||||||
|
delete_cluster
|
||||||
|
|
||||||
|
print_results "${failed[@]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
main
|
||||||
135
test/actions.github.com/self-signed-ca-setup.test.sh
Executable file
135
test/actions.github.com/self-signed-ca-setup.test.sh
Executable file
@@ -0,0 +1,135 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
DIR="$(dirname "${BASH_SOURCE[0]}")"
|
||||||
|
|
||||||
|
DIR="$(realpath "${DIR}")"
|
||||||
|
|
||||||
|
ROOT_DIR="$(ralpath "${DIR}/../../")"
|
||||||
|
|
||||||
|
source "${DIR}/helper.sh"
|
||||||
|
|
||||||
|
SCALE_SET_NAME="self-signed-crt-$(date '+%M%S')$((($RANDOM + 100) % 100 + 1))"
|
||||||
|
SCALE_SET_NAMESPACE="arc-runners"
|
||||||
|
WORKFLOW_FILE="arc-test-workflow.yaml"
|
||||||
|
ARC_NAME="arc"
|
||||||
|
ARC_NAMESPACE="arc-systems"
|
||||||
|
|
||||||
|
function install_arc() {
|
||||||
|
echo "Creating namespace ${ARC_NAMESPACE}"
|
||||||
|
kubectl create namespace "${SCALE_SET_NAMESPACE}"
|
||||||
|
|
||||||
|
echo "Installing ARC"
|
||||||
|
helm install "${ARC_NAME}" \
|
||||||
|
--namespace "${ARC_NAMESPACE}" \
|
||||||
|
--create-namespace \
|
||||||
|
--set image.repository="${IMAGE_NAME}" \
|
||||||
|
--set image.tag="${IMAGE_TAG}" \
|
||||||
|
${ROOT_DIR}/charts/gha-runner-scale-set-controller \
|
||||||
|
--debug
|
||||||
|
|
||||||
|
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
|
||||||
|
NAMESPACE="${ARC_NAMESPACE}" log_arc
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function install_scale_set() {
|
||||||
|
echo "Creating namespace ${SCALE_SET_NAMESPACE}"
|
||||||
|
kubectl create namespace "${SCALE_SET_NAMESPACE}"
|
||||||
|
|
||||||
|
echo "Installing ca-cert config map"
|
||||||
|
kubectl -n "${SCALE_SET_NAMESPACE}" create configmap ca-cert \
|
||||||
|
--from-file="${DIR}/mitmproxy/mitmproxy-ca-cert.pem"
|
||||||
|
|
||||||
|
echo "Config map:"
|
||||||
|
kubectl -n "${SCALE_SET_NAMESPACE}" get configmap ca-cert -o yaml
|
||||||
|
|
||||||
|
echo "Installing scale set ${SCALE_SET_NAME}/${SCALE_SET_NAMESPACE}"
|
||||||
|
helm install "${SCALE_SET_NAME}" \
|
||||||
|
--namespace "${SCALE_SET_NAMESPACE}" \
|
||||||
|
--create-namespace \
|
||||||
|
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
|
||||||
|
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
|
||||||
|
--set proxy.https.url="http://host.minikube.internal:3128" \
|
||||||
|
--set "proxy.noProxy[0]=10.96.0.1:443" \
|
||||||
|
--set "githubServerTLS.certificateFrom.configMapKeyRef.name=ca-cert"
|
||||||
|
--set "githubServerTLS.certificateFrom.configMapKeyRef.key=mitmproxy-ca-cert.pem"
|
||||||
|
--set "githubServerTLS.runnerMountPath=/usr/local/share/ca-certificates/" \
|
||||||
|
"${ROOT_DIR}/charts/gha-runner-scale-set" \
|
||||||
|
--debug
|
||||||
|
|
||||||
|
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
|
||||||
|
NAMESPACE="${ARC_NAMESPACE}" log_arc
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function wait_for_mitmproxy_cert() {
|
||||||
|
echo "Waiting for mitmproxy generated CA certificate"
|
||||||
|
local count=0
|
||||||
|
while true; do
|
||||||
|
if [ -f "./mitmproxy/mitmproxy-ca-cert.pem" ]; then
|
||||||
|
echo "CA certificate is generated"
|
||||||
|
echo "CA certificate:"
|
||||||
|
cat "./mitmproxy/mitmproxy-ca-cert.pem"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${count}" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for mitmproxy generated CA certificate"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
sleep 1
|
||||||
|
count=$((count + 1))
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
function run_mitmproxy() {
|
||||||
|
echo "Running mitmproxy"
|
||||||
|
docker run -d \
|
||||||
|
--rm \
|
||||||
|
--name mitmproxy \
|
||||||
|
--publish 8080:8080 \
|
||||||
|
-b ./mitmproxy:/home/mitmproxy/.mitmproxy \
|
||||||
|
mitmproxy/mitmproxy:latest \
|
||||||
|
|
||||||
|
echo "Mitm dump:"
|
||||||
|
mitmdump
|
||||||
|
|
||||||
|
if ! wait_for_mitmproxy_cert; then
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "CA certificate is generated"
|
||||||
|
|
||||||
|
sudo cp ./mitmproxy/mitmproxy-ca-cert.pem /usr/local/share/ca-certificates/mitmproxy-ca-cert.crt
|
||||||
|
sudo chown runner ./mitmproxy/mitmproxy-ca-cert.crt
|
||||||
|
}
|
||||||
|
|
||||||
|
function main() {
|
||||||
|
if [[ ! -x "$(which mitmdump)" ]]; then
|
||||||
|
echo "mitmdump is not installed"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
local failed=()
|
||||||
|
|
||||||
|
build_image
|
||||||
|
create_cluster
|
||||||
|
install_arc
|
||||||
|
run_mitmproxy
|
||||||
|
install_scale_set
|
||||||
|
|
||||||
|
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
|
||||||
|
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
|
||||||
|
|
||||||
|
NAMESPACE="${ARC_NAMESPACE}" arc_logs
|
||||||
|
delete_cluster
|
||||||
|
|
||||||
|
print_results "${failed[@]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
main
|
||||||
73
test/actions.github.com/single-namespace-setup.test.sh
Executable file
73
test/actions.github.com/single-namespace-setup.test.sh
Executable file
@@ -0,0 +1,73 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
DIR="$(dirname "${BASH_SOURCE[0]}")"
|
||||||
|
|
||||||
|
DIR="$(realpath "${DIR}")"
|
||||||
|
|
||||||
|
ROOT_DIR="$(ralpath "${DIR}/../../")"
|
||||||
|
|
||||||
|
source "${DIR}/helper.sh"
|
||||||
|
|
||||||
|
SCALE_SET_NAME="single-$(date '+%M%S')$((($RANDOM + 100) % 100 + 1))"
|
||||||
|
SCALE_SET_NAMESPACE="arc-runners"
|
||||||
|
WORKFLOW_FILE="arc-test-workflow.yaml"
|
||||||
|
ARC_NAME="arc"
|
||||||
|
ARC_NAMESPACE="arc-systems"
|
||||||
|
|
||||||
|
function install_arc() {
|
||||||
|
echo "Creating namespace ${ARC_NAMESPACE}"
|
||||||
|
kubectl create namespace "${SCALE_SET_NAMESPACE}"
|
||||||
|
|
||||||
|
echo "Installing ARC"
|
||||||
|
helm install "${ARC_NAME}" \
|
||||||
|
--namespace "${ARC_NAMESPACE}" \
|
||||||
|
--create-namespace \
|
||||||
|
--set image.repository="${IMAGE_NAME}" \
|
||||||
|
--set image.tag="${IMAGE_TAG}" \
|
||||||
|
${ROOT_DIR}/charts/gha-runner-scale-set-controller \
|
||||||
|
--debug
|
||||||
|
|
||||||
|
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
|
||||||
|
NAMESPACE="${ARC_NAMESPACE}" log_arc
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function install_scale_set() {
|
||||||
|
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME}"
|
||||||
|
helm install "${SCALE_SET_NAME}" \
|
||||||
|
--namespace "${SCALE_SET_NAMESPACE}" \
|
||||||
|
--create-namespace \
|
||||||
|
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
|
||||||
|
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
|
||||||
|
${ROOT_DIR}/charts/gha-runner-scale-set \
|
||||||
|
--version="${VERSION}" \
|
||||||
|
--debug
|
||||||
|
|
||||||
|
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
|
||||||
|
NAMESPACE="${ARC_NAMESPACE}" log_arc
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function main() {
|
||||||
|
local failed=()
|
||||||
|
|
||||||
|
build_image
|
||||||
|
create_cluster
|
||||||
|
install_arc
|
||||||
|
install_scale_set
|
||||||
|
|
||||||
|
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
|
||||||
|
|
||||||
|
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
|
||||||
|
|
||||||
|
NAMESPACE="${ARC_NAMESPACE}" arc_logs
|
||||||
|
delete_cluster
|
||||||
|
|
||||||
|
print_results "${failed[@]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
main
|
||||||
145
test/actions.github.com/update-strategy.test.sh
Executable file
145
test/actions.github.com/update-strategy.test.sh
Executable file
@@ -0,0 +1,145 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
DIR="$(dirname "${BASH_SOURCE[0]}")"
|
||||||
|
|
||||||
|
DIR="$(realpath "${DIR}")"
|
||||||
|
|
||||||
|
ROOT_DIR="$(ralpath "${DIR}/../../")"
|
||||||
|
|
||||||
|
source "${DIR}/helper.sh"
|
||||||
|
|
||||||
|
SCALE_SET_NAME="update-strategy-$(date '+%M%S')$((($RANDOM + 100) % 100 + 1))"
|
||||||
|
SCALE_SET_NAMESPACE="arc-runners"
|
||||||
|
WORKFLOW_FILE="arc-test-sleepy-matrix.yaml"
|
||||||
|
ARC_NAME="arc"
|
||||||
|
ARC_NAMESPACE="arc-systems"
|
||||||
|
|
||||||
|
function install_arc() {
|
||||||
|
echo "Installing ARC"
|
||||||
|
|
||||||
|
helm install "${ARC_NAME}" \
|
||||||
|
--namespace "${ARC_NAMESPACE}" \
|
||||||
|
--create-namespace \
|
||||||
|
--set image.repository="${IMAGE_NAME}" \
|
||||||
|
--set image.tag="${IMAGE_TAG}" \
|
||||||
|
--set flags.updateStrategy="eventual" \
|
||||||
|
${ROOT_DIR}/charts/gha-runner-scale-set-controller \
|
||||||
|
--debug
|
||||||
|
|
||||||
|
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
|
||||||
|
NAMESPACE="${ARC_NAMESPACE}" log_arc
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function install_scale_set() {
|
||||||
|
echo "Installing scale set ${SCALE_SET_NAME}/${SCALE_SET_NAMESPACE}"
|
||||||
|
helm install "${SCALE_SET_NAME}" \
|
||||||
|
--namespace "${SCALE_SET_NAMESPACE}" \
|
||||||
|
--create-namespace \
|
||||||
|
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
|
||||||
|
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
|
||||||
|
"${ROOT_DIR}/charts/gha-runner-scale-set" \
|
||||||
|
--debug
|
||||||
|
|
||||||
|
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
|
||||||
|
NAMESPACE="${ARC_NAMESPACE}" log_arc
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function upgrade_scale_set() {
|
||||||
|
echo "Upgrading scale set ${SCALE_SET_NAME}/${SCALE_SET_NAMESPACE}"
|
||||||
|
helm upgrade "${SCALE_SET_NAME}" \
|
||||||
|
--namespace "${SCALE_SET_NAMESPACE}" \
|
||||||
|
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
|
||||||
|
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
|
||||||
|
--set template.spec.containers[0].name="runner" \
|
||||||
|
--set template.spec.containers[0].image="ghcr.io/actions/actions-runner:latest" \
|
||||||
|
--set template.spec.containers[0].command={"/home/runner/run.sh"} \
|
||||||
|
--set template.spec.containers[0].env[0].name="TEST" \
|
||||||
|
--set template.spec.containers[0].env[0].value="E2E TESTS" \
|
||||||
|
${ROOT_DIR}/charts/gha-runner-scale-set \
|
||||||
|
--version="${VERSION}" \
|
||||||
|
--debug
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
function assert_listener_deleted() {
|
||||||
|
local count=0
|
||||||
|
while true; do
|
||||||
|
LISTENER_COUNT="$(kubectl get pods -l actions.github.com/scale-set-name="${SCALE_SET_NAME}" -n "${ARC_NAMESPACE}" --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
|
||||||
|
RUNNERS_COUNT="$(kubectl get pods -l app.kubernetes.io/component=runner -n "${SCALE_SET_NAMESPACE}" --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
|
||||||
|
RESOURCES="$(kubectl get pods -A)"
|
||||||
|
|
||||||
|
if [ "${LISTENER_COUNT}" -eq 0 ]; then
|
||||||
|
echo "Listener has been deleted"
|
||||||
|
echo "${RESOURCES}"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
if [ "${count}" -ge 60 ]; then
|
||||||
|
echo "Timeout waiting for listener to be deleted"
|
||||||
|
echo "${RESOURCES}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Waiting for listener to be deleted"
|
||||||
|
echo "Listener count: ${LISTENER_COUNT} target: 0 | Runners count: ${RUNNERS_COUNT} target: 3"
|
||||||
|
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
function assert_listener_recreated() {
|
||||||
|
count=0
|
||||||
|
while true; do
|
||||||
|
LISTENER_COUNT="$(kubectl get pods -l actions.github.com/scale-set-name="${SCALE_SET_NAME}" -n "${ARC_NAMESPACE}" --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
|
||||||
|
RUNNERS_COUNT="$(kubectl get pods -l app.kubernetes.io/component=runner -n "${SCALE_SET_NAMESPACE}" --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
|
||||||
|
RESOURCES="$(kubectl get pods -A)"
|
||||||
|
|
||||||
|
if [ "${LISTENER_COUNT}" -eq 1 ]; then
|
||||||
|
echo "Listener is up!"
|
||||||
|
echo "${RESOURCES}"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
if [ "${count}" -ge 120 ]; then
|
||||||
|
echo "Timeout waiting for listener to be recreated"
|
||||||
|
echo "${RESOURCES}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Waiting for listener to be recreated"
|
||||||
|
echo "Listener count: ${LISTENER_COUNT} target: 1 | Runners count: ${RUNNERS_COUNT} target: 0"
|
||||||
|
|
||||||
|
sleep 1
|
||||||
|
count=$((count+1))
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
function main() {
|
||||||
|
local failed=()
|
||||||
|
|
||||||
|
build_image
|
||||||
|
create_cluster
|
||||||
|
install_arc
|
||||||
|
install_scale_set
|
||||||
|
|
||||||
|
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
|
||||||
|
|
||||||
|
upgrade_scale_set || failed+=("upgrade_scale_set")
|
||||||
|
assert_listener_deleted || failed+=("assert_listener_deleted")
|
||||||
|
assert_listener_recreated || failed+=("assert_listener_recreated")
|
||||||
|
|
||||||
|
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
|
||||||
|
|
||||||
|
NAMESPACE="${ARC_NAMESPACE}" arc_logs
|
||||||
|
|
||||||
|
delete_cluster
|
||||||
|
|
||||||
|
print_results "${failed[@]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
main
|
||||||
@@ -36,8 +36,8 @@ var (
|
|||||||
|
|
||||||
testResultCMNamePrefix = "test-result-"
|
testResultCMNamePrefix = "test-result-"
|
||||||
|
|
||||||
RunnerVersion = "2.312.0"
|
RunnerVersion = "2.311.0"
|
||||||
RunnerContainerHooksVersion = "0.5.1"
|
RunnerContainerHooksVersion = "0.5.0"
|
||||||
)
|
)
|
||||||
|
|
||||||
// If you're willing to run this test via VS Code "run test" or "debug test",
|
// If you're willing to run this test via VS Code "run test" or "debug test",
|
||||||
|
|||||||
Reference in New Issue
Block a user