Compare commits

..

1 Commits

Author SHA1 Message Date
Bassem Dghaidi
09b542320d Revert docker socket path to /var/run/docker.sock 2024-01-20 20:12:31 +00:00
28 changed files with 213 additions and 803 deletions

View File

@@ -16,7 +16,7 @@ env:
TARGET_ORG: actions-runner-controller
TARGET_REPO: arc_e2e_test_dummy
IMAGE_NAME: "arc-test-image"
IMAGE_VERSION: "0.8.3"
IMAGE_VERSION: "0.8.1"
concurrency:
# This will make sure we only apply the concurrency limits on pull requests

View File

@@ -6,7 +6,7 @@ endif
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
VERSION ?= dev
COMMIT_SHA = $(shell git rev-parse HEAD)
RUNNER_VERSION ?= 2.313.0
RUNNER_VERSION ?= 2.311.0
TARGETPLATFORM ?= $(shell arch)
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
RUNNER_TAG ?= ${VERSION}

View File

@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.8.3
version: 0.8.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.8.3"
appVersion: "0.8.1"
home: https://github.com/actions/actions-runner-controller

View File

@@ -110,16 +110,10 @@ spec:
volumeMounts:
- mountPath: /tmp
name: tmp
{{- range .Values.volumeMounts }}
- {{ toYaml . | nindent 10 }}
{{- end }}
terminationGracePeriodSeconds: 10
volumes:
- name: tmp
emptyDir: {}
{{- range .Values.volumes }}
- {{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}

View File

@@ -424,14 +424,10 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
"tolerations[0].key": "foo",
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key": "foo",
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator": "bar",
"priorityClassName": "test-priority-class",
"flags.updateStrategy": "eventual",
"flags.logLevel": "info",
"flags.logFormat": "json",
"volumes[0].name": "customMount",
"volumes[0].configMap.name": "my-configmap",
"volumeMounts[0].name": "customMount",
"volumeMounts[0].mountPath": "/my/mount/path",
"priorityClassName": "test-priority-class",
"flags.updateStrategy": "eventual",
"flags.logLevel": "info",
"flags.logFormat": "json",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
@@ -474,11 +470,9 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.SecurityContext.FSGroup)
assert.Equal(t, "test-priority-class", deployment.Spec.Template.Spec.PriorityClassName)
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
assert.Len(t, deployment.Spec.Template.Spec.Volumes, 2)
assert.Len(t, deployment.Spec.Template.Spec.Volumes, 1)
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Volumes[0].Name)
assert.NotNil(t, deployment.Spec.Template.Spec.Volumes[0].EmptyDir)
assert.Equal(t, "customMount", deployment.Spec.Template.Spec.Volumes[1].Name)
assert.Equal(t, "my-configmap", deployment.Spec.Template.Spec.Volumes[1].ConfigMap.Name)
assert.NotNil(t, 10, deployment.Spec.Template.Spec.Volumes[0].EmptyDir)
assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 1)
assert.Equal(t, "bar", deployment.Spec.Template.Spec.NodeSelector["foo"])
@@ -527,11 +521,9 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
assert.True(t, *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsNonRoot)
assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsUser)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 2)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name)
assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath)
assert.Equal(t, "customMount", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name)
assert.Equal(t, "/my/mount/path", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath)
}
func TestTemplate_EnableLeaderElectionRole(t *testing.T) {

View File

@@ -72,10 +72,6 @@ tolerations: []
affinity: {}
# Mount volumes in the container.
volumes: []
volumeMounts: []
# Leverage a PriorityClass to ensure your pods survive resource shortages
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
# PriorityClass: system-cluster-critical

View File

@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.8.3
version: 0.8.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.8.3"
appVersion: "0.8.1"
home: https://github.com/actions/actions-runner-controller

View File

@@ -99,7 +99,7 @@ volumeMounts:
image: docker:dind
args:
- dockerd
- --host=unix:///run/docker/docker.sock
- --host=unix:///var/run/docker.sock
- --group=$(DOCKER_GROUP_GID)
env:
- name: DOCKER_GROUP_GID
@@ -110,7 +110,7 @@ volumeMounts:
- name: work
mountPath: /home/runner/_work
- name: dind-sock
mountPath: /run/docker
mountPath: /var/run
- name: dind-externals
mountPath: /home/runner/externals
{{- end }}
@@ -223,7 +223,7 @@ env:
{{- end }}
{{- if $setDockerHost }}
- name: DOCKER_HOST
value: unix:///run/docker/docker.sock
value: unix:///var/run/docker.sock
{{- end }}
{{- if $setRunnerWaitDocker }}
- name: RUNNER_WAIT_FOR_DOCKER_IN_SECONDS
@@ -264,7 +264,7 @@ volumeMounts:
{{- end }}
{{- if $mountDindCert }}
- name: dind-sock
mountPath: /run/docker
mountPath: /var/run
readOnly: true
{{- end }}
{{- if $mountGitHubServerTLS }}
@@ -526,13 +526,13 @@ volumeMounts:
{{- end }}
{{- end }}
{{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }}
{{- fail "No gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
{{- fail "No gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }}
{{- fail "Found both gha-rs-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
{{- fail "Found both gha-rs-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- if gt $multiNamespacesCounter 1 }}
{{- fail "More than one gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
{{- fail "More than one gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- if eq $multiNamespacesCounter 1 }}
{{- with $controllerDeployment.metadata }}
@@ -545,11 +545,11 @@ volumeMounts:
{{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }}
{{- end }}
{{- else }}
{{- fail "No gha-rs-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
{{- fail "No gha-rs-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- end }}
{{- if eq $managerServiceAccountNamespace "" }}
{{- fail "No service account namespace found for gha-rs-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
{{- fail "No service account namespace found for gha-rs-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }}
{{- $managerServiceAccountNamespace }}
{{- end }}

View File

@@ -34,7 +34,7 @@ type Listener interface {
//go:generate mockery --name Worker --output ./mocks --outpkg mocks --case underscore
type Worker interface {
HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error
HandleDesiredRunnerCount(ctx context.Context, count int) (int, error)
HandleDesiredRunnerCount(ctx context.Context, desiredRunnerCount int) error
}
func New(config config.Config) (*App, error) {

View File

@@ -15,28 +15,18 @@ type Worker struct {
mock.Mock
}
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count
func (_m *Worker) HandleDesiredRunnerCount(ctx context.Context, count int) (int, error) {
ret := _m.Called(ctx, count)
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, desiredRunnerCount
func (_m *Worker) HandleDesiredRunnerCount(ctx context.Context, desiredRunnerCount int) error {
ret := _m.Called(ctx, desiredRunnerCount)
var r0 int
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int) (int, error)); ok {
return rf(ctx, count)
}
if rf, ok := ret.Get(0).(func(context.Context, int) int); ok {
r0 = rf(ctx, count)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, int) error); ok {
r0 = rf(ctx, desiredRunnerCount)
} else {
r0 = ret.Get(0).(int)
r0 = ret.Error(0)
}
if rf, ok := ret.Get(1).(func(context.Context, int) error); ok {
r1 = rf(ctx, count)
} else {
r1 = ret.Error(1)
}
return r0, r1
return r0
}
// HandleJobStarted provides a mock function with given fields: ctx, jobInfo

View File

@@ -35,7 +35,6 @@ type Client interface {
DeleteMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, messageId int64) error
AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error)
RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*actions.RunnerScaleSetSession, error)
DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error
}
type Config struct {
@@ -114,7 +113,7 @@ func New(config Config) (*Listener, error) {
//go:generate mockery --name Handler --output ./mocks --outpkg mocks --case underscore
type Handler interface {
HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error
HandleDesiredRunnerCount(ctx context.Context, count int) (int, error)
HandleDesiredRunnerCount(ctx context.Context, desiredRunnerCount int) error
}
// Listen listens for incoming messages and handles them using the provided handler.
@@ -127,12 +126,6 @@ func (l *Listener) Listen(ctx context.Context, handler Handler) error {
return fmt.Errorf("createSession failed: %w", err)
}
defer func() {
if err := l.deleteMessageSession(); err != nil {
l.logger.Error(err, "failed to delete message session")
}
}()
initialMessage := &actions.RunnerScaleSetMessage{
MessageId: 0,
MessageType: "RunnerScaleSetJobMessages",
@@ -140,21 +133,28 @@ func (l *Listener) Listen(ctx context.Context, handler Handler) error {
Body: "",
}
if l.session.Statistics == nil {
return fmt.Errorf("session statistics is nil")
}
l.metrics.PublishStatistics(initialMessage.Statistics)
if l.session.Statistics.TotalAvailableJobs > 0 || l.session.Statistics.TotalAssignedJobs > 0 {
acquirableJobs, err := l.client.GetAcquirableJobs(ctx, l.scaleSetID)
if err != nil {
return fmt.Errorf("failed to call GetAcquirableJobs: %w", err)
}
desiredRunners, err := handler.HandleDesiredRunnerCount(ctx, initialMessage.Statistics.TotalAssignedJobs)
if err != nil {
acquirableJobsJson, err := json.Marshal(acquirableJobs)
if err != nil {
return fmt.Errorf("failed to marshal acquirable jobs: %w", err)
}
initialMessage.Body = string(acquirableJobsJson)
}
if err := handler.HandleDesiredRunnerCount(ctx, initialMessage.Statistics.TotalAssignedJobs); err != nil {
return fmt.Errorf("handling initial message failed: %w", err)
}
l.metrics.PublishDesiredRunners(desiredRunners)
for {
select {
case <-ctx.Done():
return ctx.Err()
return fmt.Errorf("context cancelled: %w", ctx.Err())
default:
}
@@ -167,52 +167,27 @@ func (l *Listener) Listen(ctx context.Context, handler Handler) error {
continue
}
// New context is created to avoid cancelation during message handling.
if err := l.handleMessage(context.Background(), handler, msg); err != nil {
return fmt.Errorf("failed to handle message: %w", err)
}
}
}
func (l *Listener) handleMessage(ctx context.Context, handler Handler, msg *actions.RunnerScaleSetMessage) error {
parsedMsg, err := l.parseMessage(ctx, msg)
if err != nil {
return fmt.Errorf("failed to parse message: %w", err)
}
l.metrics.PublishStatistics(parsedMsg.statistics)
if len(parsedMsg.jobsAvailable) > 0 {
acquiredJobIDs, err := l.acquireAvailableJobs(ctx, parsedMsg.jobsAvailable)
statistics, jobsStarted, err := l.parseMessage(ctx, msg)
if err != nil {
return fmt.Errorf("failed to acquire jobs: %w", err)
return fmt.Errorf("failed to parse message: %w", err)
}
l.logger.Info("Jobs are acquired", "count", len(acquiredJobIDs), "requestIds", fmt.Sprint(acquiredJobIDs))
}
l.lastMessageID = msg.MessageId
for _, jobCompleted := range parsedMsg.jobsCompleted {
l.metrics.PublishJobCompleted(jobCompleted)
}
l.lastMessageID = msg.MessageId
if err := l.deleteLastMessage(ctx); err != nil {
return fmt.Errorf("failed to delete message: %w", err)
}
for _, jobStarted := range parsedMsg.jobsStarted {
if err := handler.HandleJobStarted(ctx, jobStarted); err != nil {
return fmt.Errorf("failed to handle job started: %w", err)
if err := l.deleteLastMessage(ctx); err != nil {
return fmt.Errorf("failed to delete message: %w", err)
}
l.metrics.PublishJobStarted(jobStarted)
}
desiredRunners, err := handler.HandleDesiredRunnerCount(ctx, parsedMsg.statistics.TotalAssignedJobs)
if err != nil {
return fmt.Errorf("failed to handle desired runner count: %w", err)
for _, jobStarted := range jobsStarted {
if err := handler.HandleJobStarted(ctx, jobStarted); err != nil {
return fmt.Errorf("failed to handle job started: %w", err)
}
}
if err := handler.HandleDesiredRunnerCount(ctx, statistics.TotalAssignedJobs); err != nil {
return fmt.Errorf("failed to handle desired runner count: %w", err)
}
}
l.metrics.PublishDesiredRunners(desiredRunners)
return nil
}
func (l *Listener) createSession(ctx context.Context) error {
@@ -296,57 +271,48 @@ func (l *Listener) deleteLastMessage(ctx context.Context) error {
return nil
}
type parsedMessage struct {
statistics *actions.RunnerScaleSetStatistic
jobsStarted []*actions.JobStarted
jobsAvailable []*actions.JobAvailable
jobsCompleted []*actions.JobCompleted
}
func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSetMessage) (*parsedMessage, error) {
if msg.MessageType != "RunnerScaleSetJobMessages" {
l.logger.Info("Skipping message", "messageType", msg.MessageType)
return nil, fmt.Errorf("invalid message type: %s", msg.MessageType)
}
func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSetMessage) (*actions.RunnerScaleSetStatistic, []*actions.JobStarted, error) {
l.logger.Info("Processing message", "messageId", msg.MessageId, "messageType", msg.MessageType)
if msg.Statistics == nil {
return nil, fmt.Errorf("invalid message: statistics is nil")
return nil, nil, fmt.Errorf("invalid message: statistics is nil")
}
l.logger.Info("New runner scale set statistics.", "statistics", msg.Statistics)
if msg.MessageType != "RunnerScaleSetJobMessages" {
l.logger.Info("Skipping message", "messageType", msg.MessageType)
return nil, nil, fmt.Errorf("invalid message type: %s", msg.MessageType)
}
var batchedMessages []json.RawMessage
if len(msg.Body) > 0 {
if err := json.Unmarshal([]byte(msg.Body), &batchedMessages); err != nil {
return nil, fmt.Errorf("failed to unmarshal batched messages: %w", err)
return nil, nil, fmt.Errorf("failed to unmarshal batched messages: %w", err)
}
}
parsedMsg := &parsedMessage{
statistics: msg.Statistics,
}
var availableJobs []int64
var startedJobs []*actions.JobStarted
for _, msg := range batchedMessages {
var messageType actions.JobMessageType
if err := json.Unmarshal(msg, &messageType); err != nil {
return nil, fmt.Errorf("failed to decode job message type: %w", err)
return nil, nil, fmt.Errorf("failed to decode job message type: %w", err)
}
switch messageType.MessageType {
case messageTypeJobAvailable:
var jobAvailable actions.JobAvailable
if err := json.Unmarshal(msg, &jobAvailable); err != nil {
return nil, fmt.Errorf("failed to decode job available: %w", err)
return nil, nil, fmt.Errorf("failed to decode job available: %w", err)
}
l.logger.Info("Job available message received", "jobId", jobAvailable.RunnerRequestId)
parsedMsg.jobsAvailable = append(parsedMsg.jobsAvailable, &jobAvailable)
availableJobs = append(availableJobs, jobAvailable.RunnerRequestId)
case messageTypeJobAssigned:
var jobAssigned actions.JobAssigned
if err := json.Unmarshal(msg, &jobAssigned); err != nil {
return nil, fmt.Errorf("failed to decode job assigned: %w", err)
return nil, nil, fmt.Errorf("failed to decode job assigned: %w", err)
}
l.logger.Info("Job assigned message received", "jobId", jobAssigned.RunnerRequestId)
@@ -354,39 +320,43 @@ func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSet
case messageTypeJobStarted:
var jobStarted actions.JobStarted
if err := json.Unmarshal(msg, &jobStarted); err != nil {
return nil, fmt.Errorf("could not decode job started message. %w", err)
return nil, nil, fmt.Errorf("could not decode job started message. %w", err)
}
l.logger.Info("Job started message received.", "RequestId", jobStarted.RunnerRequestId, "RunnerId", jobStarted.RunnerId)
parsedMsg.jobsStarted = append(parsedMsg.jobsStarted, &jobStarted)
startedJobs = append(startedJobs, &jobStarted)
case messageTypeJobCompleted:
var jobCompleted actions.JobCompleted
if err := json.Unmarshal(msg, &jobCompleted); err != nil {
return nil, fmt.Errorf("failed to decode job completed: %w", err)
return nil, nil, fmt.Errorf("failed to decode job completed: %w", err)
}
l.logger.Info("Job completed message received.", "RequestId", jobCompleted.RunnerRequestId, "Result", jobCompleted.Result, "RunnerId", jobCompleted.RunnerId, "RunnerName", jobCompleted.RunnerName)
parsedMsg.jobsCompleted = append(parsedMsg.jobsCompleted, &jobCompleted)
default:
l.logger.Info("unknown job message type.", "messageType", messageType.MessageType)
}
}
return parsedMsg, nil
}
l.logger.Info("Available jobs.", "count", len(availableJobs), "requestIds", fmt.Sprint(availableJobs))
if len(availableJobs) > 0 {
acquired, err := l.acquireAvailableJobs(ctx, availableJobs)
if err != nil {
return nil, nil, err
}
func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*actions.JobAvailable) ([]int64, error) {
ids := make([]int64, 0, len(jobsAvailable))
for _, job := range jobsAvailable {
ids = append(ids, job.RunnerRequestId)
l.logger.Info("Jobs are acquired", "count", len(acquired), "requestIds", fmt.Sprint(acquired))
}
l.logger.Info("Acquiring jobs", "count", len(ids), "requestIds", fmt.Sprint(ids))
return msg.Statistics, startedJobs, nil
}
idsAcquired, err := l.client.AcquireJobs(ctx, l.scaleSetID, l.session.MessageQueueAccessToken, ids)
func (l *Listener) acquireAvailableJobs(ctx context.Context, availableJobs []int64) ([]int64, error) {
l.logger.Info("Acquiring jobs")
ids, err := l.client.AcquireJobs(ctx, l.scaleSetID, l.session.MessageQueueAccessToken, availableJobs)
if err == nil { // if NO errors
return idsAcquired, nil
return ids, nil
}
expiredError := &actions.MessageQueueTokenExpiredError{}
@@ -398,12 +368,12 @@ func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*ac
return nil, err
}
idsAcquired, err = l.client.AcquireJobs(ctx, l.scaleSetID, l.session.MessageQueueAccessToken, ids)
ids, err = l.client.AcquireJobs(ctx, l.scaleSetID, l.session.MessageQueueAccessToken, availableJobs)
if err != nil {
return nil, fmt.Errorf("failed to acquire jobs after session refresh: %w", err)
}
return idsAcquired, nil
return ids, nil
}
func (l *Listener) refreshSession(ctx context.Context) error {
@@ -416,16 +386,3 @@ func (l *Listener) refreshSession(ctx context.Context) error {
l.session = session
return nil
}
func (l *Listener) deleteMessageSession() error {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
l.logger.Info("Deleting message session")
if err := l.client.DeleteMessageSession(ctx, l.session.RunnerScaleSet.Id, l.session.SessionId); err != nil {
return fmt.Errorf("failed to delete message session: %w", err)
}
return nil
}

View File

@@ -2,7 +2,6 @@ package listener
import (
"context"
"encoding/json"
"errors"
"net/http"
"testing"
@@ -10,6 +9,7 @@ import (
listenermocks "github.com/actions/actions-runner-controller/cmd/ghalistener/listener/mocks"
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
metricsmocks "github.com/actions/actions-runner-controller/cmd/ghalistener/metrics/mocks"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
@@ -38,6 +38,22 @@ func TestNew(t *testing.T) {
assert.NotNil(t, l)
})
t.Run("SetStaticMetrics", func(t *testing.T) {
t.Parallel()
metrics := metricsmocks.NewPublisher(t)
metrics.On("PublishStatic", mock.Anything, mock.Anything).Once()
config := Config{
Client: listenermocks.NewClient(t),
ScaleSetID: 1,
Metrics: metrics,
}
l, err := New(config)
assert.Nil(t, err)
assert.NotNil(t, l)
})
}
func TestListener_createSession(t *testing.T) {
@@ -419,8 +435,6 @@ func TestListener_Listen(t *testing.T) {
Statistics: &actions.RunnerScaleSetStatistic{},
}
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessageSession", mock.Anything, session.RunnerScaleSet.Id, session.SessionId).Return(nil).Once()
config.Client = client
l, err := New(config)
@@ -429,7 +443,7 @@ func TestListener_Listen(t *testing.T) {
var called bool
handler := listenermocks.NewHandler(t)
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything).
Return(0, nil).
Return(nil).
Run(
func(mock.Arguments) {
called = true
@@ -442,64 +456,6 @@ func TestListener_Listen(t *testing.T) {
assert.True(t, errors.Is(err, context.Canceled))
assert.True(t, called)
})
t.Run("CancelContextAfterGetMessage", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: &actions.RunnerScaleSetStatistic{},
}
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessageSession", mock.Anything, session.RunnerScaleSet.Id, session.SessionId).Return(nil).Once()
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Statistics: &actions.RunnerScaleSetStatistic{},
}
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything).
Return(msg, nil).
Run(
func(mock.Arguments) {
cancel()
},
).
Once()
// Ensure delete message is called with background context
client.On("DeleteMessage", context.Background(), mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
config.Client = client
handler := listenermocks.NewHandler(t)
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything).
Return(0, nil).
Once()
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything).
Return(0, nil).
Once()
l, err := New(config)
require.Nil(t, err)
err = l.Listen(ctx, handler)
assert.ErrorIs(t, context.Canceled, err)
})
}
func TestListener_acquireAvailableJobs(t *testing.T) {
@@ -533,24 +489,7 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
Statistics: &actions.RunnerScaleSetStatistic{},
}
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 3,
},
},
}
_, err = l.acquireAvailableJobs(ctx, availableJobs)
_, err = l.acquireAvailableJobs(ctx, []int64{1, 2, 3})
assert.Error(t, err)
})
@@ -584,26 +523,9 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
Statistics: &actions.RunnerScaleSetStatistic{},
}
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 3,
},
},
}
acquiredJobIDs, err := l.acquireAvailableJobs(ctx, availableJobs)
acquiredJobIDs, err := l.acquireAvailableJobs(ctx, []int64{1, 2, 3})
assert.NoError(t, err)
assert.Equal(t, []int64{1, 2, 3}, acquiredJobIDs)
assert.Equal(t, jobIDs, acquiredJobIDs)
})
t.Run("RefreshAndSucceeds", func(t *testing.T) {
@@ -628,43 +550,12 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
// First call to AcquireJobs will fail with a token expired error
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once()
// Second call to AcquireJobs will succeed
want := []int64{1, 2, 3}
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 3,
},
},
}
// First call to AcquireJobs will fail with a token expired error
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).
Run(func(args mock.Arguments) {
ids := args.Get(3).([]int64)
assert.Equal(t, want, ids)
}).
Return(nil, &actions.MessageQueueTokenExpiredError{}).
Once()
// First call to AcquireJobs will fail with a token expired error
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).
Run(func(args mock.Arguments) {
ids := args.Get(3).([]int64)
assert.Equal(t, want, ids)
}).
Return(want, nil).
Once()
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(want, nil).Once()
config.Client = client
@@ -676,7 +567,7 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
RunnerScaleSet: &actions.RunnerScaleSet{},
}
got, err := l.acquireAvailableJobs(ctx, availableJobs)
got, err := l.acquireAvailableJobs(ctx, want)
assert.Nil(t, err)
assert.Equal(t, want, got)
})
@@ -715,165 +606,8 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
RunnerScaleSet: &actions.RunnerScaleSet{},
}
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 3,
},
},
}
got, err := l.acquireAvailableJobs(ctx, availableJobs)
got, err := l.acquireAvailableJobs(ctx, []int64{1, 2, 3})
assert.NotNil(t, err)
assert.Nil(t, got)
})
}
func TestListener_parseMessage(t *testing.T) {
t.Run("FailOnEmptyStatistics", func(t *testing.T) {
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Statistics: nil,
}
l := &Listener{}
parsedMsg, err := l.parseMessage(context.Background(), msg)
assert.Error(t, err)
assert.Nil(t, parsedMsg)
})
t.Run("FailOnIncorrectMessageType", func(t *testing.T) {
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerMessages", // arbitrary message type
Statistics: &actions.RunnerScaleSetStatistic{},
}
l := &Listener{}
parsedMsg, err := l.parseMessage(context.Background(), msg)
assert.Error(t, err)
assert.Nil(t, parsedMsg)
})
t.Run("ParseAll", func(t *testing.T) {
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Body: "",
Statistics: &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
TotalAcquiredJobs: 2,
TotalAssignedJobs: 3,
TotalRunningJobs: 4,
TotalRegisteredRunners: 5,
TotalBusyRunners: 6,
TotalIdleRunners: 7,
},
}
var batchedMessages []any
jobsAvailable := []*actions.JobAvailable{
{
AcquireJobUrl: "https://github.com/example",
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAvailable,
},
RunnerRequestId: 1,
},
},
{
AcquireJobUrl: "https://github.com/example",
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAvailable,
},
RunnerRequestId: 2,
},
},
}
for _, msg := range jobsAvailable {
batchedMessages = append(batchedMessages, msg)
}
jobsAssigned := []*actions.JobAssigned{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAssigned,
},
RunnerRequestId: 3,
},
},
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAssigned,
},
RunnerRequestId: 4,
},
},
}
for _, msg := range jobsAssigned {
batchedMessages = append(batchedMessages, msg)
}
jobsStarted := []*actions.JobStarted{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobStarted,
},
RunnerRequestId: 5,
},
RunnerId: 2,
RunnerName: "runner2",
},
}
for _, msg := range jobsStarted {
batchedMessages = append(batchedMessages, msg)
}
jobsCompleted := []*actions.JobCompleted{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobCompleted,
},
RunnerRequestId: 6,
},
Result: "success",
RunnerId: 1,
RunnerName: "runner1",
},
}
for _, msg := range jobsCompleted {
batchedMessages = append(batchedMessages, msg)
}
b, err := json.Marshal(batchedMessages)
require.NoError(t, err)
msg.Body = string(b)
l := &Listener{}
parsedMsg, err := l.parseMessage(context.Background(), msg)
require.NoError(t, err)
assert.Equal(t, msg.Statistics, parsedMsg.statistics)
assert.Equal(t, jobsAvailable, parsedMsg.jobsAvailable)
assert.Equal(t, jobsStarted, parsedMsg.jobsStarted)
assert.Equal(t, jobsCompleted, parsedMsg.jobsCompleted)
})
}

View File

@@ -1,205 +0,0 @@
package listener
import (
"context"
"encoding/json"
"testing"
listenermocks "github.com/actions/actions-runner-controller/cmd/ghalistener/listener/mocks"
metricsmocks "github.com/actions/actions-runner-controller/cmd/ghalistener/metrics/mocks"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestInitialMetrics(t *testing.T) {
t.Parallel()
t.Run("SetStaticMetrics", func(t *testing.T) {
t.Parallel()
metrics := metricsmocks.NewPublisher(t)
minRunners := 5
maxRunners := 10
metrics.On("PublishStatic", minRunners, maxRunners).Once()
config := Config{
Client: listenermocks.NewClient(t),
ScaleSetID: 1,
Metrics: metrics,
MinRunners: minRunners,
MaxRunners: maxRunners,
}
l, err := New(config)
assert.Nil(t, err)
assert.NotNil(t, l)
})
t.Run("InitialMessageStatistics", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
sessionStatistics := &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
TotalAcquiredJobs: 2,
TotalAssignedJobs: 3,
TotalRunningJobs: 4,
TotalRegisteredRunners: 5,
TotalBusyRunners: 6,
TotalIdleRunners: 7,
}
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: sessionStatistics,
}
metrics := metricsmocks.NewPublisher(t)
metrics.On("PublishStatic", mock.Anything, mock.Anything).Once()
metrics.On("PublishStatistics", sessionStatistics).Once()
metrics.On("PublishDesiredRunners", sessionStatistics.TotalAssignedJobs).
Run(
func(mock.Arguments) {
cancel()
},
).Once()
config := Config{
Client: listenermocks.NewClient(t),
ScaleSetID: 1,
Metrics: metrics,
}
client := listenermocks.NewClient(t)
client.On("CreateMessageSession", mock.Anything, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessageSession", mock.Anything, session.RunnerScaleSet.Id, session.SessionId).Return(nil).Once()
config.Client = client
handler := listenermocks.NewHandler(t)
handler.On("HandleDesiredRunnerCount", mock.Anything, sessionStatistics.TotalAssignedJobs).
Return(sessionStatistics.TotalAssignedJobs, nil).
Once()
l, err := New(config)
assert.Nil(t, err)
assert.NotNil(t, l)
assert.ErrorIs(t, context.Canceled, l.Listen(ctx, handler))
})
}
func TestHandleMessageMetrics(t *testing.T) {
t.Parallel()
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Body: "",
Statistics: &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
TotalAcquiredJobs: 2,
TotalAssignedJobs: 3,
TotalRunningJobs: 4,
TotalRegisteredRunners: 5,
TotalBusyRunners: 6,
TotalIdleRunners: 7,
},
}
var batchedMessages []any
jobsStarted := []*actions.JobStarted{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobStarted,
},
RunnerRequestId: 8,
},
RunnerId: 3,
RunnerName: "runner3",
},
}
for _, msg := range jobsStarted {
batchedMessages = append(batchedMessages, msg)
}
jobsCompleted := []*actions.JobCompleted{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobCompleted,
},
RunnerRequestId: 6,
},
Result: "success",
RunnerId: 1,
RunnerName: "runner1",
},
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobCompleted,
},
RunnerRequestId: 7,
},
Result: "success",
RunnerId: 2,
RunnerName: "runner2",
},
}
for _, msg := range jobsCompleted {
batchedMessages = append(batchedMessages, msg)
}
b, err := json.Marshal(batchedMessages)
require.NoError(t, err)
msg.Body = string(b)
desiredResult := 4
metrics := metricsmocks.NewPublisher(t)
metrics.On("PublishStatic", 0, 0).Once()
metrics.On("PublishStatistics", msg.Statistics).Once()
metrics.On("PublishJobCompleted", jobsCompleted[0]).Once()
metrics.On("PublishJobCompleted", jobsCompleted[1]).Once()
metrics.On("PublishJobStarted", jobsStarted[0]).Once()
metrics.On("PublishDesiredRunners", desiredResult).Once()
handler := listenermocks.NewHandler(t)
handler.On("HandleJobStarted", mock.Anything, jobsStarted[0]).Return(nil).Once()
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything).Return(desiredResult, nil).Once()
client := listenermocks.NewClient(t)
client.On("DeleteMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
config := Config{
Client: listenermocks.NewClient(t),
ScaleSetID: 1,
Metrics: metrics,
}
l, err := New(config)
require.NoError(t, err)
l.client = client
l.session = &actions.RunnerScaleSetSession{
OwnerName: "",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "",
MessageQueueAccessToken: "",
Statistics: &actions.RunnerScaleSetStatistic{},
}
err = l.handleMessage(context.Background(), handler, msg)
require.NoError(t, err)
}

View File

@@ -83,20 +83,6 @@ func (_m *Client) DeleteMessage(ctx context.Context, messageQueueUrl string, mes
return r0
}
// DeleteMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
func (_m *Client) DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error {
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) error); ok {
r0 = rf(ctx, runnerScaleSetId, sessionId)
} else {
r0 = ret.Error(0)
}
return r0
}
// GetAcquirableJobs provides a mock function with given fields: ctx, runnerScaleSetId
func (_m *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*actions.AcquirableJobList, error) {
ret := _m.Called(ctx, runnerScaleSetId)

View File

@@ -15,28 +15,18 @@ type Handler struct {
mock.Mock
}
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count
func (_m *Handler) HandleDesiredRunnerCount(ctx context.Context, count int) (int, error) {
ret := _m.Called(ctx, count)
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, desiredRunnerCount
func (_m *Handler) HandleDesiredRunnerCount(ctx context.Context, desiredRunnerCount int) error {
ret := _m.Called(ctx, desiredRunnerCount)
var r0 int
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int) (int, error)); ok {
return rf(ctx, count)
}
if rf, ok := ret.Get(0).(func(context.Context, int) int); ok {
r0 = rf(ctx, count)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, int) error); ok {
r0 = rf(ctx, desiredRunnerCount)
} else {
r0 = ret.Get(0).(int)
r0 = ret.Error(0)
}
if rf, ok := ret.Get(1).(func(context.Context, int) error); ok {
r1 = rf(ctx, count)
} else {
r1 = ret.Error(1)
}
return r0, r1
return r0
}
// HandleJobStarted provides a mock function with given fields: ctx, jobInfo

View File

@@ -11,7 +11,6 @@ import (
"github.com/actions/actions-runner-controller/logging"
jsonpatch "github.com/evanphx/json-patch"
"github.com/go-logr/logr"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
@@ -142,10 +141,6 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
Do(ctx).
Into(patchedStatus)
if err != nil {
if kerrors.IsNotFound(err) {
w.logger.Info("Ephemeral runner not found, skipping patching of ephemeral runner status", "runnerName", jobInfo.RunnerName)
return nil
}
return fmt.Errorf("could not patch ephemeral runner status, patch JSON: %s, error: %w", string(mergePatch), err)
}
@@ -161,7 +156,7 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
// The function then scales the ephemeral runner set by applying the merge patch.
// Finally, it logs the scaled ephemeral runner set details and returns nil if successful.
// If any error occurs during the process, it returns an error with a descriptive message.
func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int) (int, error) {
func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int) error {
// Max runners should always be set by the resource builder either to the configured value,
// or the maximum int32 (resourcebuilder.newAutoScalingListener()).
targetRunnerCount := min(w.config.MinRunners+count, w.config.MaxRunners)
@@ -176,7 +171,7 @@ func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int) (int,
if targetRunnerCount == w.lastPatch {
w.logger.Info("Skipping patching of EphemeralRunnerSet as the desired count has not changed", logValues...)
return targetRunnerCount, nil
return nil
}
original, err := json.Marshal(
@@ -187,7 +182,7 @@ func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int) (int,
},
)
if err != nil {
return 0, fmt.Errorf("failed to marshal empty ephemeral runner set: %w", err)
return fmt.Errorf("failed to marshal empty ephemeral runner set: %w", err)
}
patch, err := json.Marshal(
@@ -199,12 +194,12 @@ func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int) (int,
)
if err != nil {
w.logger.Error(err, "could not marshal patch ephemeral runner set")
return 0, err
return err
}
mergePatch, err := jsonpatch.CreateMergePatch(original, patch)
if err != nil {
return 0, fmt.Errorf("failed to create merge patch json for ephemeral runner set: %w", err)
return fmt.Errorf("failed to create merge patch json for ephemeral runner set: %w", err)
}
w.logger.Info("Created merge patch json for EphemeralRunnerSet update", "json", string(mergePatch))
@@ -222,7 +217,7 @@ func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int) (int,
Do(ctx).
Into(patchedEphemeralRunnerSet)
if err != nil {
return 0, fmt.Errorf("could not patch ephemeral runner set , patch JSON: %s, error: %w", string(mergePatch), err)
return fmt.Errorf("could not patch ephemeral runner set , patch JSON: %s, error: %w", string(mergePatch), err)
}
w.logger.Info("Ephemeral runner set scaled.",
@@ -230,5 +225,5 @@ func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int) (int,
"name", w.config.EphemeralRunnerSetName,
"replicas", patchedEphemeralRunnerSet.Spec.Replicas,
)
return targetRunnerCount, nil
return nil
}

View File

@@ -226,7 +226,6 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
ports = append(ports, port)
}
terminationGracePeriodSeconds := int64(60)
podSpec := corev1.PodSpec{
ServiceAccountName: serviceAccount.Name,
Containers: []corev1.Container{
@@ -257,9 +256,8 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
},
},
},
ImagePullSecrets: autoscalingListener.Spec.ImagePullSecrets,
RestartPolicy: corev1.RestartPolicyNever,
TerminationGracePeriodSeconds: &terminationGracePeriodSeconds,
ImagePullSecrets: autoscalingListener.Spec.ImagePullSecrets,
RestartPolicy: corev1.RestartPolicyNever,
}
labels := make(map[string]string, len(autoscalingListener.Labels))

View File

@@ -43,16 +43,6 @@ You can follow [this troubleshooting guide](https://docs.github.com/en/actions/h
## Changelog
### v0.8.3
1. Expose volumeMounts and volumes in gha-runner-scale-set-controller [#3260](https://github.com/actions/actions-runner-controller/pull/3260)
1. Refer to the correct variable in discovery error message [#3296](https://github.com/actions/actions-runner-controller/pull/3296)
1. Fix acquire jobs after session refresh ghalistener [#3307](https://github.com/actions/actions-runner-controller/pull/3307)
### v0.8.2
1. Add listener graceful termination period and background context after the message is received [#3187](https://github.com/actions/actions-runner-controller/pull/3187)
1. Publish metrics in the new ghalistener [#3193](https://github.com/actions/actions-runner-controller/pull/3193)
1. Delete message session when listener.Listen returns [#3240](https://github.com/actions/actions-runner-controller/pull/3240)
### v0.8.1
1. Fix proxy issue in new listener client [#3181](https://github.com/actions/actions-runner-controller/pull/3181)

View File

@@ -12,4 +12,4 @@ We do not intend to provide a supported ARC dashboard. This is simply a referenc
1. Make sure to have [Grafana](https://grafana.com/docs/grafana/latest/installation/) and [Prometheus](https://prometheus.io/docs/prometheus/latest/installation/) running in your cluster.
2. Make sure that Prometheus is properly scraping the metrics endpoints of the controller-manager and listeners.
3. Import the [dashboard](ARC-Autoscaling-Runner-Set-Monitoring_1692627561838.json) into Grafana.
3. Import the [dashboard](ARC-Autoscaling-Runner-Set-Monitoring_1692627561838.json.json) into Grafana.

View File

@@ -640,11 +640,8 @@ func (c *Client) doSessionRequest(ctx context.Context, method, path string, requ
return err
}
if resp.StatusCode == expectedResponseStatusCode {
if responseUnmarshalTarget != nil {
return json.NewDecoder(resp.Body).Decode(responseUnmarshalTarget)
}
return nil
if resp.StatusCode == expectedResponseStatusCode && responseUnmarshalTarget != nil {
return json.NewDecoder(resp.Body).Decode(responseUnmarshalTarget)
}
if resp.StatusCode >= 400 && resp.StatusCode < 500 {

View File

@@ -1,20 +1,18 @@
-----BEGIN CERTIFICATE-----
MIIDVTCCAj2gAwIBAgIUOo9VGKll71GYjunZhdMQhS5rP+gwDQYJKoZIhvcNAQEL
BQAwOTESMBAGA1UEAwwJbG9jYWxob3N0MQswCQYDVQQGEwJVUzEWMBQGA1UEBwwN
U2FuIEZyYW5zaXNjbzAgFw0yNDAxMjIxMjUyNTdaGA8yMDUxMDYwODEyNTI1N1ow
OTESMBAGA1UEAwwJbG9jYWxob3N0MQswCQYDVQQGEwJVUzEWMBQGA1UEBwwNU2Fu
IEZyYW5zaXNjbzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALmyQRuC
S13Iat5jMun5zg8tn4E3RZ4x5KWPvRiR9RRX4zo5f/ytmnFVGkSnDhXJkuHRzwWl
KjtdW23uUaBfNbJR55O0qUnZWAMNKO1Afm68Tfg+91a5X+KpwGiHfIGZs7UCERYg
6O2iqHQMLCOL/Ytpd6NBF+QFK9klRbfncBJmCR6FEpw1/bGr7HwlldfkPkpHNWUG
cIqytYBvzo2T2cUyrTysKtATcRg/4Fp0DAZocYfzT6/gL2yWhLwnmxqU7Gbxvrd2
6ejFitgxwoM/3rKWuXds7tFMeiKUu2RovGkvDkMEieJWwTufPBJjkIklW5S4iMMi
hJnDIn+Ag1nbVHcCAwEAAaNTMFEwHQYDVR0OBBYEFK33e+IWho6FKn4GaxRb2cmv
mmxjMB8GA1UdIwQYMBaAFK33e+IWho6FKn4GaxRb2cmvmmxjMA8GA1UdEwEB/wQF
MAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHZ/Z3CSrPoWb02+iu1cUN8nlQBtAsxI
oR3nqhUSEA/9oyyXJt8NIIXauACyYzmNXG87aKQZvVzUEQM0aK4MBq+Pg0Zdnvns
8QtBvdro7jInHhfn4uS8X21Fa1gYZ0d0C6UHIXUeD9KSEOAX1JT+3VP/7FNIDzns
2ddSxzcji3eVFkDR4/1vRMTng/kiP5vFz1St1op2EYDT+v6PVr9ew3NWUf/w7fgP
sRRyx3qi7m8SRHc7FwDLk+6/zc1/14YIiX9PrvVmnJj0yULSHiBu4cQccKE2ibos
ZeUPfZL8Kl+hs/MtXG/XlYBbApm69eo7EEGHAS/2DIq2yPgsQrGMYkA=
MIIC6jCCAdICCQCoZFduxPa/eDANBgkqhkiG9w0BAQsFADA2MQswCQYDVQQGEwJV
UzEnMCUGA1UEAwweYWN0aW9ucy1ydW5uZXItY29udHJvbGxlci10ZXN0MCAXDTIz
MDExOTE2NTAwMVoYDzIwNTAwNjA1MTY1MDAxWjA2MQswCQYDVQQGEwJVUzEnMCUG
A1UEAwweYWN0aW9ucy1ydW5uZXItY29udHJvbGxlci10ZXN0MIIBIjANBgkqhkiG
9w0BAQEFAAOCAQ8AMIIBCgKCAQEAykHCU0I/pdzhnQBwr2N+7so66LPq0cxc8JJL
S2mmk7gg+NWhTZzoci6aYXNRKCyH6B2Wmy7Qveku2wqT2+/4JBMYgTWH5bF7yt76
LB+x9YruSgH/pBN2WI4vRU87NOAU8F0o0U/Lp5vAJoRo+ePPvcHu0OY1WF+QnEX+
xtp6gJFGf5DT4U9upwEgQjKgvKFEoB5KNeH1qr2fS2yA2vhm6Uhm+1i/KUQUZ49K
GvFK8TQQT4HXft8rPLP5M9OitdqVU8SX0dQoXZ4M41/qydycHOvApj0LlH/XsicZ
x0mkF90hD+9VRqeYFe562NI4NHR7FGP7HKPWibNjXKC2w+z+aQIDAQABMA0GCSqG
SIb3DQEBCwUAA4IBAQBxaOCnmakd1PPp+pH40OjUktKG1nqM2tGqP0o3Bk7huB2y
jXIDi9ETuTeqqHONwwgsKOVY3J+Zt5R+teBSC0qUnypODzu+9v8Xa4Is9G9GyT5S
erjpPcJjQnvZyMHLH9DGGWE9UCyqKIqmaEc9bwr2oz1+a0rsaS3ZdIFlQibBHij5
tdJcnzXfN4T4GIbYXKMCOYDy/5CiNJ26l/pQNpO9JCzsEmngw0ooS0Bi8EcTCgB6
dsHl0w8va3l1kvxWWIlNTGwrAEpRbXmL01hAqx2yCiaFPVZ/eRNWmBWO4LpW4ouK
YOaA+X7geM6XVFlZE3cP58AxYKWHGAThxkZbD5cu
-----END CERTIFICATE-----

View File

@@ -1,23 +1,22 @@
-----BEGIN CERTIFICATE-----
MIIDyDCCArCgAwIBAgIUKCU/uCdz/9EcfzL6wd7ubSPrsxIwDQYJKoZIhvcNAQEL
BQAwOTESMBAGA1UEAwwJbG9jYWxob3N0MQswCQYDVQQGEwJVUzEWMBQGA1UEBwwN
U2FuIEZyYW5zaXNjbzAgFw0yNDAxMjIxMjU0MTRaGA8yMDUxMDYwODEyNTQxNFow
gYgxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1T
YW4gRnJhbnNpc2NvMRMwEQYDVQQKDApHaXRIdWJUZXN0MSMwIQYDVQQLDBpHaXRI
dWJUZXN0IEFjdGlvbnMgUnVudGltZTESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjAN
BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArVQ7yHHAxehcsOW8NNEplrEF/48n
9+XCc4ZWu0LdPdKAjcwMSAddHvLZVp5OUNRTUKgwWfL5DyGFnAhSZ31Ag3FHyoOB
C5BQSBEd+xsO1Gflt8Pm0A7TN2jzlVx7rq1j7kZ25AZY9oJ6ipK4Hf4mYbfSR5cl
M2WKBPGk9JbYmI7l0t3IYLm954xxfNtPxr1tEAwk75UAKNWXBwqkR31+madOaFsU
9LJT4aeFJoFs+95tQzvAymGwlE+w6aWiz0WecLSzf8ZgXcRqmQkh1EcP6/2cu5MA
CMRJcNly421DYUEbofgoZ8OetkqtFcYk+RyjUBhkQWi8AAQLKJ4q7VZKqwIDAQAB
o3YwdDAfBgNVHSMEGDAWgBSt93viFoaOhSp+BmsUW9nJr5psYzAJBgNVHRMEAjAA
MAsGA1UdDwQEAwIE8DAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8AAAEwHQYDVR0O
BBYEFM4ELRkBcflqUtQ/GQK86CjBqjTUMA0GCSqGSIb3DQEBCwUAA4IBAQCMkiid
7v2jsSWc8nGOM4Z6vEJ912mKpyyfpWSpM8SxCCxzUrbMrpFx8LB4rmeziy6hNEA0
yv+h9qiu9l/vVzVc3Q9HA3linEPXqnlUEXd7PV/G/IFoYKFrXi/H+zda9G0Nqt1A
oOKM3t9fsff8KDaRQ2sdSUEjqtAlfg6bbBwO66CICXLU+VUH7hOVghT23UJVvwNY
Dvkha9TYR+aawRypLoTfT5ZtLp/0A9P+liqo6F5Xm0M89bYLXNPl1fPzY3Ihi5Jd
b6/mttpY9gxTfbw67m2Epfmt1NdOHkY7ac/Hr6pt/YyMBrPz9Z3eZxIXUIVDo/Nh
4O2g9RoFFN4m3A+d
MIIDnTCCAoWgAwIBAgIJAJskDVhiEY6fMA0GCSqGSIb3DQEBCwUAMDYxCzAJBgNV
BAYTAlVTMScwJQYDVQQDDB5hY3Rpb25zLXJ1bm5lci1jb250cm9sbGVyLXRlc3Qw
HhcNMjMwMTE5MTY1MTE0WhcNMjQwMTE5MTY1MTE0WjBaMQswCQYDVQQGEwJVUzEi
MCAGA1UECgwZYWN0aW9ucy1ydW5uZXItY29udHJvbGxlcjEnMCUGA1UECwweYWN0
aW9ucy1ydW5uZXItY29udHJvbGxlciB0ZXN0MIIBIjANBgkqhkiG9w0BAQEFAAOC
AQ8AMIIBCgKCAQEAzOTt1/VjuaHzn+b7jLeufW3rxLHFKQV+LiUiT389rbFGY+DN
CC+Nzx+DbFBpKcX/scseVhFzlXlrESWWZ4h7LGMXRsTDKs91F1RMuFCd8eIEwbuV
civR44IqT5r/0hlMOWemd3Fh/c8KF+9dWQ0q0T3tvlVzEbWNRTVAXTT4JzizqNd1
1hhnuV/KjhiptPC/8jQ4D9ocZKM8a1pM9O2z3bnmH7VTQJkhjxE7gefQTPQRmvKk
C7uqvfk2NHTTnKiLfkE10JhLTa0VND2aofNWCybGTyHNNCNlepakoP3KyFC2LjPR
oR5iwSnCRDu1z8tDWW+rIa3pfxdQ8LnH4J4CDwIDAQABo4GJMIGGMFAGA1UdIwRJ
MEehOqQ4MDYxCzAJBgNVBAYTAlVTMScwJQYDVQQDDB5hY3Rpb25zLXJ1bm5lci1j
b250cm9sbGVyLXRlc3SCCQCoZFduxPa/eDAJBgNVHRMEAjAAMAsGA1UdDwQEAwIE
8DAaBgNVHREEEzARhwR/AAABgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEB
ALdl0ytjellmhtjbXkUZKAl/R2ZXMAVxIOtb4qiN6OOwOMK4p2Wt26p34bQa2JD0
t0qvesI7spQzQObNMdT6NZJl8Ul0ABuzti/Esvmby+VfsFPasCQVXx+jqGhERqXc
SeZFIVWVACyfAc1dkqfGwehSrY62eBlY2PJ1JezagW6aLAnV6Si+96++mkALJDdX
MZhhSqjxM+Nnmhpy4My6oHVrdYWHcuVhzlEmNaMtmJCYuihIyD2Usn32xJK1k89d
WgEOPCk+ZDAligPlGZS201fsznJk5uIjmxPjjFlJLXotBs8H7j0cQ2JkV5YHsHCk
EYf5EJ0ZKtZbwRFeRC1Ajxg=
-----END CERTIFICATE-----

View File

@@ -1,28 +1,27 @@
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCtVDvIccDF6Fyw
5bw00SmWsQX/jyf35cJzhla7Qt090oCNzAxIB10e8tlWnk5Q1FNQqDBZ8vkPIYWc
CFJnfUCDcUfKg4ELkFBIER37Gw7UZ+W3w+bQDtM3aPOVXHuurWPuRnbkBlj2gnqK
krgd/iZht9JHlyUzZYoE8aT0ltiYjuXS3chgub3njHF820/GvW0QDCTvlQAo1ZcH
CqRHfX6Zp05oWxT0slPhp4UmgWz73m1DO8DKYbCUT7DppaLPRZ5wtLN/xmBdxGqZ
CSHURw/r/Zy7kwAIxElw2XLjbUNhQRuh+Chnw562Sq0VxiT5HKNQGGRBaLwABAso
nirtVkqrAgMBAAECggEAR+/t4ANWPs1xqvmuYz1sRV6zXp3LuNdjHQ9kb9QQftgf
ArrtXfewbmfcTFbnqiR1b8ReTPbK57zB90B88vbJD8S0RxjNNj9vEnoIN2/Dd+Sn
Mt3brf55K0Yj0pnPu2+7Sel07q6zvZvpwBmk0M3qoCPq4kuY5Pv/jI2+KMVyn94A
Dc3J6xdKqLNsw7nhUDELHn8DrKQgqucTzi4goJo8Lwc9I8lanTfmbiXj1wYo3nhr
5DgVcPUceZnsrDNnfkwOaaXKAGUCTi3PWieKq6Cm22oh53s1WS5NJDuk/1NvvfV+
+6dyhfmW/jkHHMelox91n1qmLMYnq+GhoK6szapqAQKBgQDLRWZH17zdTNALQzks
RbZU9abe+UQV1O5ywdL+4F444IPY2f3gxhEWyL+xAF66ZG0+NA/EO9n7FPqAbgyA
Atz0LT7W6o9/AveqBSNs73zxGo7OYlBDq81nCgMzU11nvfTmydJhaMC+6Zyh0Bbc
vzIbygpDOL7tg4AyyEcLUNA7BwKBgQDaSnmwMCEdcTENwzVd1mOZdnXRTBPz0u0t
aCK5voL99L0+8HyKjtUBtWbBgUxCz7/+mfoNCU+QUHCJksm9vN1m5Zq4r0aEHE36
7lYAAeWnltg+OHWqGcSHRZ/zHHs8c/azemvRaTZnZ++meVkfd07jsd+yIYt/G3La
KV9t86V2PQKBgEfNdfm+vVo2ve6cil+XKHcOZymwR1qm4qvqx4t82guhUzGQn1t8
26B+vSfbB5szylsErOUWd0N3/5zKQuQdHsuqB96G8LVe6PlH42GhnzLTvMoudEfT
MjVJliPVONNiiFXVyNjb1eoaP1fxV4IWj669Sa7BJsBjiS9nC6F1pHiVAoGBALBT
fFxPZFBuAFvHlTIJXUa3I5A+zdckSCVnerVjKFiO+tb+VvttSK4qo6gnEzzcp4+3
PP6OyNAfyee2xHMZPhZB3WrVWjaYznylTJ6Q6bsn4+DOpm0Sh2dlXEB6fylj2qE7
gCAVxrZchH6Kgu0h6H2QTsuKwS2ZNHr49HbSWpNZAoGBAMrEMiyKYWKgiejs69pj
idKifoCDI+Hu1WD/eViUm2OuOfdW9fIBHoeuKmOBKGYIqx5yEbFhXoJmTtJ1aSa1
+N+0NBzv9+1W5EII0voELevxLvjeaejcUgLNabGIj1xIcPzaEKTS+Vv2Hn6nffWR
yKlIixoSTJ+oJShyT9DZyZAd
-----END PRIVATE KEY-----
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAzOTt1/VjuaHzn+b7jLeufW3rxLHFKQV+LiUiT389rbFGY+DN
CC+Nzx+DbFBpKcX/scseVhFzlXlrESWWZ4h7LGMXRsTDKs91F1RMuFCd8eIEwbuV
civR44IqT5r/0hlMOWemd3Fh/c8KF+9dWQ0q0T3tvlVzEbWNRTVAXTT4JzizqNd1
1hhnuV/KjhiptPC/8jQ4D9ocZKM8a1pM9O2z3bnmH7VTQJkhjxE7gefQTPQRmvKk
C7uqvfk2NHTTnKiLfkE10JhLTa0VND2aofNWCybGTyHNNCNlepakoP3KyFC2LjPR
oR5iwSnCRDu1z8tDWW+rIa3pfxdQ8LnH4J4CDwIDAQABAoIBAC5rr3c+IVntV0Tj
EBrRgrboMIJfxEuG8w+BWkSoj1DK2SfHxqwUGgzTFvNzRGAye7vMSRM24Pj8iUVZ
Pro2MbHcwWlHKvCID/85GiioGyCyFGHQHgu/4c2pr+xZMZxoHtzintRw28KlJaRG
lt+WHB1L6pE0yt04RMlpRyvW1GIODtEh1wya61Aa2xZMJxgbNWv89znLI2f3ForY
QR/he8hQtfJQeH+mv2SvJ1bopkJ58ZObKapuJAWCSxzVRj/yol1MqfUDBy4NrJfY
F5UP0BSmnED1EdIXeC0duo5RyiSfHqqJlcKR+zlepOb4pr4I1H8P6AIJ9iiunxUJ
h9i+YAECgYEA7JgrH5sjStcHdwUgEPN4E1MI3WRwDnnuVpzeSUH7tRqK4lsdIKmF
u/ss3TMwTgC8XR4JJbVp+6ea54zpVjtBGwShMSvn2+e7OHHg1YoVqBIgRpL+/C4m
wfon2EglQ0IjscUtKuAR/UyhU6vZtkYRUKeXRKisW4yoobdob0Y4lakCgYEA3bMl
BfszC5c0RXI5vsBUBvr9gXMY/8tacM7H8i3vT7JqoYJG6GGST0/HeK+La3w2zryx
Q8IL6uLy/HZvTHZ+BSp4KzwwgDUIk0jm/JcvzD2ZhJHoAo4aQTc6QI2ZNgjGVwCb
nJ0Niaxc4CdSUEAUHH1bCXk/e2stcnieFuiiPPcCgYAIxrA60OdjPEyzloYU+uMG
XHskszgQ4Wb84X7BWug6VIy4Tsbq0j76tRt57Q8qpY5XKekO9AbFZfcyBaEWKMaG
eQp9p3JHTvY75sV/Rkr9XAbEd2lr805OvbfCpxJyxz5JttWxFHS2X6RQVTyTLVAx
HLZYvqT+FF6g+QuvrPwmWQKBgAQspVvReQqU1EUie3feAzcGbtOLKUNXvuI04orq
1oC3qU5VN6SUgb7Aj87z7zoc4qNN5kCSXMsVbuHWEQ5thL3wKMcXoQoo9Xpgewjy
h9Herw9R9/5kUpY7xfsFL4dW7vUga82tH14iQrVtyBz+t+I5cgdhoxJd2EM5hjCE
PNnNAoGBALPjmvEZ1HJdCOxY/AisziVtOFc6Glk/KhpSIT7WE1me4qLQFmrsHIDQ
kZ8Sb1f3PQ4T4vHGrtl8qh144MJPI1Nb8klzdlD1xeypGpgXoQb5fsC17g1fgczp
TGzq3pvnlGnrgVmnfrWQCHXDLzXtLqM/Pu84guPFftJQ+++yy0np
-----END RSA PRIVATE KEY-----

2
go.mod
View File

@@ -10,7 +10,7 @@ require (
github.com/golang-jwt/jwt/v4 v4.5.0
github.com/google/go-cmp v0.6.0
github.com/google/go-github/v52 v52.0.0
github.com/google/uuid v1.6.0
github.com/google/uuid v1.4.0
github.com/gorilla/mux v1.8.1
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
github.com/gruntwork-io/terratest v0.46.7

4
go.sum
View File

@@ -99,8 +99,8 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20231101202521-4ca4178f5c7a h1:fEBsGL/sjAuJrgah5XqmmYsTLzJp/TO9Lhy39gkverk=
github.com/google/pprof v0.0.0-20231101202521-4ca4178f5c7a/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=

View File

@@ -6,8 +6,8 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless
OS_IMAGE ?= ubuntu-22.04
TARGETPLATFORM ?= $(shell arch)
RUNNER_VERSION ?= 2.313.0
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.5.1
RUNNER_VERSION ?= 2.311.0
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.5.0
DOCKER_VERSION ?= 24.0.7
# default list of platforms for which multiarch image is built

View File

@@ -1,2 +1,2 @@
RUNNER_VERSION=2.313.0
RUNNER_CONTAINER_HOOKS_VERSION=0.5.1
RUNNER_VERSION=2.311.0
RUNNER_CONTAINER_HOOKS_VERSION=0.5.0

View File

@@ -36,8 +36,8 @@ var (
testResultCMNamePrefix = "test-result-"
RunnerVersion = "2.313.0"
RunnerContainerHooksVersion = "0.5.1"
RunnerVersion = "2.311.0"
RunnerContainerHooksVersion = "0.5.0"
)
// If you're willing to run this test via VS Code "run test" or "debug test",