Compare commits

...

3 Commits

Author SHA1 Message Date
gateixeira
1f615c1a33 feat: add default linux nodeSelector to listener pod (#4377)
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2026-02-24 17:56:39 +01:00
Nikola Jokic
8b7fd9ffef Switch client to scaleset library for the listener and update mocks (#4383) 2026-02-24 14:17:31 +01:00
Nikola Jokic
c6e4c94a6a Fix tests and generate mocks (#4384) 2026-02-24 13:36:01 +01:00
39 changed files with 2841 additions and 3353 deletions

View File

@@ -47,9 +47,7 @@ jobs:
- name: Install tools
run: |
curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.2.0/kubebuilder_2.2.0_linux_amd64.tar.gz
tar zxvf kubebuilder_2.2.0_linux_amd64.tar.gz
sudo mv kubebuilder_2.2.0_linux_amd64 /usr/local/kubebuilder
curl -s https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh | bash
sudo mv kustomize /usr/local/bin
curl -L -O https://github.com/tcnksm/ghr/releases/download/v0.13.0/ghr_v0.13.0_linux_amd64.tar.gz

View File

@@ -66,6 +66,19 @@ jobs:
- name: Check diff
run: git diff --exit-code
mocks:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: "go.mod"
cache: false
- name: "Run mockery"
run: go tool github.com/vektra/mockery/v3
- name: Check diff
run: git diff --exit-code
test:
runs-on: ubuntu-latest
steps:
@@ -76,13 +89,11 @@ jobs:
- run: make manifests
- name: Check diff
run: git diff --exit-code
- name: Install kubebuilder
- name: Setup envtest
run: |
curl -D headers.txt -fsL "https://storage.googleapis.com/kubebuilder-tools/kubebuilder-tools-1.30.0-linux-amd64.tar.gz" -o kubebuilder-tools
echo "$(grep -i etag headers.txt -m 1 | cut -d'"' -f2) kubebuilder-tools" > sum
md5sum -c sum
tar -zvxf kubebuilder-tools
sudo mv kubebuilder /usr/local/
go install sigs.k8s.io/controller-runtime/tools/setup-envtest@$(go list -m -f '{{ .Version }}' sigs.k8s.io/controller-runtime | awk -F'[v.]' '{printf "release-%d.%d", $2, $3}')
ENVTEST_K8S_VERSION=$(go list -m -f '{{ .Version }}' k8s.io/api | awk -F'[v.]' '{printf "1.%d", $3}')
echo "KUBEBUILDER_ASSETS=$(setup-envtest use ${ENVTEST_K8S_VERSION} -p path)" >> $GITHUB_ENV
- name: Run go tests
run: |
go test -short `go list ./... | grep -v ./test_e2e_arc`

2
.gitignore vendored
View File

@@ -34,5 +34,5 @@ bin
# OS
.DS_STORE
/test-assets
/.tools

View File

@@ -12,3 +12,7 @@ linters:
exclusions:
presets:
- std-error-handling
rules:
- linters:
- staticcheck
text: "QF1008:"

20
.mockery.yaml Normal file
View File

@@ -0,0 +1,20 @@
packages:
github.com/actions/actions-runner-controller/github/actions:
config:
inpackage: true
dir: "{{.InterfaceDir}}"
filename: "mocks_test.go"
pkgname: "actions"
interfaces:
ActionsService:
SessionService:
github.com/actions/actions-runner-controller/cmd/ghalistener/metrics:
config:
inpackage: true
dir: "{{.InterfaceDir}}"
filename: "mocks_test.go"
pkgname: "metrics"
interfaces:
Recorder:
ServerExporter:

View File

@@ -102,22 +102,19 @@ A set of example pipelines (./acceptance/pipelines) are provided in this reposit
When raising a PR please run the relevant suites to prove your change hasn't broken anything.
#### Running Ginkgo Tests
You can run the integration test suite that is written in Ginkgo with:
```shell
make test-with-deps
```
This will firstly install a few binaries required to setup the integration test environment and then runs `go test` to start the Ginkgo test.
This will install `setup-envtest`, download the required envtest binaries (etcd, kube-apiserver, kubectl), and then run `go test`.
If you don't want to use `make`, like when you're running tests from your IDE, install required binaries to `/usr/local/kubebuilder/bin`.
That's the directory in which controller-runtime's `envtest` framework locates the binaries.
If you don't want to use `make`, install the envtest binaries using `setup-envtest`:
```shell
sudo mkdir -p /usr/local/kubebuilder/bin
make kube-apiserver etcd
sudo mv test-assets/{etcd,kube-apiserver} /usr/local/kubebuilder/bin/
go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
export KUBEBUILDER_ASSETS=$(setup-envtest use -p path)
go test -v -run TestAPIs github.com/actions/actions-runner-controller/controllers/actions.summerwind.net
```

View File

@@ -1,5 +1,5 @@
# Build the manager binary
FROM --platform=$BUILDPLATFORM golang:1.25.1 AS builder
FROM --platform=$BUILDPLATFORM golang:1.25.3 AS builder
WORKDIR /workspace

110
Makefile
View File

@@ -32,21 +32,15 @@ else
GOBIN=$(shell go env GOBIN)
endif
TEST_ASSETS=$(PWD)/test-assets
TOOLS_PATH=$(PWD)/.tools
OS_NAME := $(shell uname -s | tr A-Z a-z)
# The etcd packages that coreos maintain use different extensions for each *nix OS on their github release page.
# ETCD_EXTENSION: the storage format file extension listed on the release page.
# EXTRACT_COMMAND: the appropriate CLI command for extracting this file format.
ifeq ($(OS_NAME), darwin)
ETCD_EXTENSION:=zip
EXTRACT_COMMAND:=unzip
else
ETCD_EXTENSION:=tar.gz
EXTRACT_COMMAND:=tar -xzf
endif
# ENVTEST_VERSION is the version of controller-runtime release branch to fetch the envtest setup script
ENVTEST_VERSION ?= $(shell go list -m -f "{{ .Version }}" sigs.k8s.io/controller-runtime | awk -F'[v.]' '{printf "release-%d.%d", $$2, $$3}')
# ENVTEST_K8S_VERSION is the version of Kubernetes to use for setting up ENVTEST binaries
ENVTEST_K8S_VERSION ?= $(shell go list -m -f "{{ .Version }}" k8s.io/api | awk -F'[v.]' '{printf "1.%d", $$3}')
ENVTEST ?= $(GOBIN)/setup-envtest
# default list of platforms for which multiarch image is built
ifeq (${PLATFORMS}, )
@@ -72,18 +66,19 @@ lint:
GO_TEST_ARGS ?= -short
# Run tests
test: generate fmt vet manifests shellcheck
go test $(GO_TEST_ARGS) `go list ./... | grep -v ./test_e2e_arc` -coverprofile cover.out
go test -fuzz=Fuzz -fuzztime=10s -run=Fuzz* ./controllers/actions.summerwind.net
test-with-deps: kube-apiserver etcd kubectl
# See https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#pkg-constants
TEST_ASSET_KUBE_APISERVER=$(KUBE_APISERVER_BIN) \
TEST_ASSET_ETCD=$(ETCD_BIN) \
TEST_ASSET_KUBECTL=$(KUBECTL_BIN) \
# Run tests
test: generate fmt vet manifests shellcheck setup-envtest
KUBEBUILDER_ASSETS="$$($(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(GOBIN) -p path)" \
go test $(GO_TEST_ARGS) `go list ./... | grep -v ./test_e2e_arc` -coverprofile cover.out
KUBEBUILDER_ASSETS="$$($(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(GOBIN) -p path)" \
go test -fuzz=Fuzz -fuzztime=10s -run=Fuzz* ./controllers/actions.summerwind.net
test-with-deps: setup-envtest
KUBEBUILDER_ASSETS="$$($(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(GOBIN) -p path)" \
make test
# Build manager binary
manager: generate fmt vet
go build -o bin/manager main.go
@@ -364,68 +359,29 @@ ifeq (, $(wildcard $(TOOLS_PATH)/shellcheck))
endif
SHELLCHECK=$(TOOLS_PATH)/shellcheck
# find or download etcd
etcd:
ifeq (, $(shell which etcd))
ifeq (, $(wildcard $(TEST_ASSETS)/etcd))
# find or download envtest
envtest:
ifeq (, $(shell which setup-envtest))
ifeq (, $(wildcard $(GOBIN)/setup-envtest))
@{ \
set -xe ;\
INSTALL_TMP_DIR=$$(mktemp -d) ;\
cd $$INSTALL_TMP_DIR ;\
wget https://github.com/coreos/etcd/releases/download/v3.4.22/etcd-v3.4.22-$(OS_NAME)-amd64.$(ETCD_EXTENSION);\
mkdir -p $(TEST_ASSETS) ;\
$(EXTRACT_COMMAND) etcd-v3.4.22-$(OS_NAME)-amd64.$(ETCD_EXTENSION) ;\
mv etcd-v3.4.22-$(OS_NAME)-amd64/etcd $(TEST_ASSETS)/etcd ;\
rm -rf $$INSTALL_TMP_DIR ;\
set -e ;\
ENVTEST_TMP_DIR=$$(mktemp -d) ;\
cd $$ENVTEST_TMP_DIR ;\
go mod init tmp ;\
go install sigs.k8s.io/controller-runtime/tools/setup-envtest@$(ENVTEST_VERSION) ;\
rm -rf $$ENVTEST_TMP_DIR ;\
}
ETCD_BIN=$(TEST_ASSETS)/etcd
else
ETCD_BIN=$(TEST_ASSETS)/etcd
endif
ENVTEST=$(GOBIN)/setup-envtest
else
ETCD_BIN=$(shell which etcd)
ENVTEST=$(shell which setup-envtest)
endif
# find or download kube-apiserver
kube-apiserver:
ifeq (, $(shell which kube-apiserver))
ifeq (, $(wildcard $(TEST_ASSETS)/kube-apiserver))
@{ \
set -xe ;\
INSTALL_TMP_DIR=$$(mktemp -d) ;\
cd $$INSTALL_TMP_DIR ;\
wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
mkdir -p $(TEST_ASSETS) ;\
tar zxvf kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kube-apiserver $(TEST_ASSETS)/kube-apiserver ;\
rm -rf $$INSTALL_TMP_DIR ;\
.PHONY: setup-envtest
setup-envtest: envtest
@echo "Setting up envtest binaries for Kubernetes version $(ENVTEST_K8S_VERSION)..."
@$(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(GOBIN) -p path || { \
echo "Error: Failed to set up envtest binaries for version $(ENVTEST_K8S_VERSION)."; \
exit 1; \
}
KUBE_APISERVER_BIN=$(TEST_ASSETS)/kube-apiserver
else
KUBE_APISERVER_BIN=$(TEST_ASSETS)/kube-apiserver
endif
else
KUBE_APISERVER_BIN=$(shell which kube-apiserver)
endif
# find or download kubectl
kubectl:
ifeq (, $(shell which kubectl))
ifeq (, $(wildcard $(TEST_ASSETS)/kubectl))
@{ \
set -xe ;\
INSTALL_TMP_DIR=$$(mktemp -d) ;\
cd $$INSTALL_TMP_DIR ;\
wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
mkdir -p $(TEST_ASSETS) ;\
tar zxvf kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kubectl $(TEST_ASSETS)/kubectl ;\
rm -rf $$INSTALL_TMP_DIR ;\
}
KUBECTL_BIN=$(TEST_ASSETS)/kubectl
else
KUBECTL_BIN=$(TEST_ASSETS)/kubectl
endif
else
KUBECTL_BIN=$(shell which kubectl)
endif

View File

@@ -1,143 +0,0 @@
package app
import (
"context"
"errors"
"fmt"
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
"github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
"github.com/actions/actions-runner-controller/cmd/ghalistener/worker"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
"golang.org/x/sync/errgroup"
)
// App is responsible for initializing required components and running the app.
type App struct {
// configured fields
config *config.Config
logger logr.Logger
// initialized fields
listener Listener
worker Worker
metrics metrics.ServerExporter
}
//go:generate mockery --name Listener --output ./mocks --outpkg mocks --case underscore
type Listener interface {
Listen(ctx context.Context, handler listener.Handler) error
}
//go:generate mockery --name Worker --output ./mocks --outpkg mocks --case underscore
type Worker interface {
HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error
HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error)
}
func New(config config.Config) (*App, error) {
if err := config.Validate(); err != nil {
return nil, fmt.Errorf("failed to validate config: %w", err)
}
app := &App{
config: &config,
}
ghConfig, err := actions.ParseGitHubConfigFromURL(config.ConfigureUrl)
if err != nil {
return nil, fmt.Errorf("failed to parse GitHub config from URL: %w", err)
}
{
logger, err := config.Logger()
if err != nil {
return nil, fmt.Errorf("failed to create logger: %w", err)
}
app.logger = logger.WithName("listener-app")
}
actionsClient, err := config.ActionsClient(app.logger)
if err != nil {
return nil, fmt.Errorf("failed to create actions client: %w", err)
}
if config.MetricsAddr != "" {
app.metrics = metrics.NewExporter(metrics.ExporterConfig{
ScaleSetName: config.EphemeralRunnerSetName,
ScaleSetNamespace: config.EphemeralRunnerSetNamespace,
Enterprise: ghConfig.Enterprise,
Organization: ghConfig.Organization,
Repository: ghConfig.Repository,
ServerAddr: config.MetricsAddr,
ServerEndpoint: config.MetricsEndpoint,
Metrics: config.Metrics,
Logger: app.logger.WithName("metrics exporter"),
})
}
worker, err := worker.New(
worker.Config{
EphemeralRunnerSetNamespace: config.EphemeralRunnerSetNamespace,
EphemeralRunnerSetName: config.EphemeralRunnerSetName,
MaxRunners: config.MaxRunners,
MinRunners: config.MinRunners,
},
worker.WithLogger(app.logger.WithName("worker")),
)
if err != nil {
return nil, fmt.Errorf("failed to create new kubernetes worker: %w", err)
}
app.worker = worker
listener, err := listener.New(listener.Config{
Client: actionsClient,
ScaleSetID: app.config.RunnerScaleSetId,
MinRunners: app.config.MinRunners,
MaxRunners: app.config.MaxRunners,
Logger: app.logger.WithName("listener"),
Metrics: app.metrics,
})
if err != nil {
return nil, fmt.Errorf("failed to create new listener: %w", err)
}
app.listener = listener
app.logger.Info("app initialized")
return app, nil
}
func (app *App) Run(ctx context.Context) error {
var errs []error
if app.worker == nil {
errs = append(errs, fmt.Errorf("worker not initialized"))
}
if app.listener == nil {
errs = append(errs, fmt.Errorf("listener not initialized"))
}
if err := errors.Join(errs...); err != nil {
return fmt.Errorf("app not initialized: %w", err)
}
g, ctx := errgroup.WithContext(ctx)
metricsCtx, cancelMetrics := context.WithCancelCause(ctx)
g.Go(func() error {
app.logger.Info("Starting listener")
listnerErr := app.listener.Listen(ctx, app.worker)
cancelMetrics(fmt.Errorf("Listener exited: %w", listnerErr))
return listnerErr
})
if app.metrics != nil {
g.Go(func() error {
app.logger.Info("Starting metrics server")
return app.metrics.ListenAndServe(metricsCtx)
})
}
return g.Wait()
}

View File

@@ -1,85 +0,0 @@
package app
import (
"context"
"errors"
"testing"
appmocks "github.com/actions/actions-runner-controller/cmd/ghalistener/app/mocks"
"github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
metricsMocks "github.com/actions/actions-runner-controller/cmd/ghalistener/metrics/mocks"
"github.com/actions/actions-runner-controller/cmd/ghalistener/worker"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
func TestApp_Run(t *testing.T) {
t.Parallel()
t.Run("ListenerWorkerGuard", func(t *testing.T) {
invalidApps := []*App{
{},
{worker: &worker.Worker{}},
{listener: &listener.Listener{}},
}
for _, app := range invalidApps {
assert.Error(t, app.Run(context.Background()))
}
})
t.Run("ExitsOnListenerError", func(t *testing.T) {
listener := appmocks.NewListener(t)
worker := appmocks.NewWorker(t)
listener.On("Listen", mock.Anything, mock.Anything).Return(errors.New("listener error")).Once()
app := &App{
listener: listener,
worker: worker,
}
err := app.Run(context.Background())
assert.Error(t, err)
})
t.Run("ExitsOnListenerNil", func(t *testing.T) {
listener := appmocks.NewListener(t)
worker := appmocks.NewWorker(t)
listener.On("Listen", mock.Anything, mock.Anything).Return(nil).Once()
app := &App{
listener: listener,
worker: worker,
}
err := app.Run(context.Background())
assert.NoError(t, err)
})
t.Run("CancelListenerOnMetricsServerError", func(t *testing.T) {
listener := appmocks.NewListener(t)
worker := appmocks.NewWorker(t)
metrics := metricsMocks.NewServerPublisher(t)
ctx := context.Background()
listener.On("Listen", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
ctx := args.Get(0).(context.Context)
go func() {
<-ctx.Done()
}()
}).Return(nil).Once()
metrics.On("ListenAndServe", mock.Anything).Return(errors.New("metrics server error")).Once()
app := &App{
listener: listener,
worker: worker,
metrics: metrics,
}
err := app.Run(ctx)
assert.Error(t, err)
})
}

View File

@@ -1,43 +0,0 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package mocks
import (
context "context"
listener "github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
mock "github.com/stretchr/testify/mock"
)
// Listener is an autogenerated mock type for the Listener type
type Listener struct {
mock.Mock
}
// Listen provides a mock function with given fields: ctx, handler
func (_m *Listener) Listen(ctx context.Context, handler listener.Handler) error {
ret := _m.Called(ctx, handler)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, listener.Handler) error); ok {
r0 = rf(ctx, handler)
} else {
r0 = ret.Error(0)
}
return r0
}
// NewListener creates a new instance of Listener. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewListener(t interface {
mock.TestingT
Cleanup(func())
}) *Listener {
mock := &Listener{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -1,68 +0,0 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package mocks
import (
actions "github.com/actions/actions-runner-controller/github/actions"
context "context"
mock "github.com/stretchr/testify/mock"
)
// Worker is an autogenerated mock type for the Worker type
type Worker struct {
mock.Mock
}
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count, acquireCount
func (_m *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, acquireCount int) (int, error) {
ret := _m.Called(ctx, count, acquireCount)
var r0 int
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, int) (int, error)); ok {
return rf(ctx, count, acquireCount)
}
if rf, ok := ret.Get(0).(func(context.Context, int, int) int); ok {
r0 = rf(ctx, count, acquireCount)
} else {
r0 = ret.Get(0).(int)
}
if rf, ok := ret.Get(1).(func(context.Context, int, int) error); ok {
r1 = rf(ctx, count, acquireCount)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// HandleJobStarted provides a mock function with given fields: ctx, jobInfo
func (_m *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
ret := _m.Called(ctx, jobInfo)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *actions.JobStarted) error); ok {
r0 = rf(ctx, jobInfo)
} else {
r0 = ret.Error(0)
}
return r0
}
// NewWorker creates a new instance of Worker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewWorker(t interface {
mock.TestingT
Cleanup(func())
}) *Worker {
mock := &Worker{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -5,21 +5,23 @@ import (
"crypto/x509"
"encoding/json"
"fmt"
"log/slog"
"net/http"
"net/url"
"os"
"strings"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
"github.com/actions/actions-runner-controller/build"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/logging"
"github.com/actions/actions-runner-controller/vault"
"github.com/actions/actions-runner-controller/vault/azurekeyvault"
"github.com/go-logr/logr"
"github.com/actions/scaleset"
"golang.org/x/net/http/httpproxy"
)
const appName = "ghalistener"
type Config struct {
ConfigureUrl string `json:"configure_url"`
VaultType vault.VaultType `json:"vault_type"`
@@ -34,7 +36,7 @@ type Config struct {
EphemeralRunnerSetName string `json:"ephemeral_runner_set_name"`
MaxRunners int `json:"max_runners"`
MinRunners int `json:"min_runners"`
RunnerScaleSetId int `json:"runner_scale_set_id"`
RunnerScaleSetID int `json:"runner_scale_set_id"`
RunnerScaleSetName string `json:"runner_scale_set_name"`
ServerRootCA string `json:"server_root_ca"`
LogLevel string `json:"log_level"`
@@ -108,8 +110,8 @@ func (c *Config) Validate() error {
return fmt.Errorf("EphemeralRunnerSetNamespace %q or EphemeralRunnerSetName %q is missing", c.EphemeralRunnerSetNamespace, c.EphemeralRunnerSetName)
}
if c.RunnerScaleSetId == 0 {
return fmt.Errorf(`RunnerScaleSetId "%d" is missing`, c.RunnerScaleSetId)
if c.RunnerScaleSetID == 0 {
return fmt.Errorf(`RunnerScaleSetId "%d" is missing`, c.RunnerScaleSetID)
}
if c.MaxRunners < c.MinRunners {
@@ -134,40 +136,51 @@ func (c *Config) Validate() error {
return nil
}
func (c *Config) Logger() (logr.Logger, error) {
logLevel := string(logging.LogLevelDebug)
if c.LogLevel != "" {
logLevel = c.LogLevel
func (c *Config) Logger() (*slog.Logger, error) {
var lvl slog.Level
switch strings.ToLower(c.LogLevel) {
case "debug":
lvl = slog.LevelDebug
case "info":
lvl = slog.LevelInfo
case "warn":
lvl = slog.LevelWarn
case "error":
lvl = slog.LevelError
default:
return nil, fmt.Errorf("invalid log level: %s", c.LogLevel)
}
logFormat := string(logging.LogFormatText)
if c.LogFormat != "" {
logFormat = c.LogFormat
var logger *slog.Logger
switch c.LogFormat {
case "json":
logger = slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{
AddSource: true,
Level: lvl,
}))
case "text":
logger = slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
AddSource: true,
Level: lvl,
}))
default:
return nil, fmt.Errorf("invalid log format: %s", c.LogFormat)
}
logger, err := logging.NewLogger(logLevel, logFormat)
if err != nil {
return logr.Logger{}, fmt.Errorf("NewLogger failed: %w", err)
}
return logger, nil
return logger.With("app", appName), nil
}
func (c *Config) ActionsClient(logger logr.Logger, clientOptions ...actions.ClientOption) (*actions.Client, error) {
var creds actions.ActionsAuth
switch c.Token {
case "":
creds.AppCreds = &actions.GitHubAppAuth{
AppID: c.AppID,
AppInstallationID: c.AppInstallationID,
AppPrivateKey: c.AppPrivateKey,
}
default:
creds.Token = c.Token
func (c *Config) ActionsClient(logger *slog.Logger, clientOptions ...scaleset.HTTPOption) (*scaleset.Client, error) {
systemInfo := scaleset.SystemInfo{
System: "actions-runner-controller",
Version: build.Version,
CommitSHA: build.CommitSHA,
ScaleSetID: c.RunnerScaleSetID,
Subsystem: appName,
}
options := append([]actions.ClientOption{
actions.WithLogger(logger),
options := append([]scaleset.HTTPOption{
scaleset.WithLogger(logger),
}, clientOptions...)
if c.ServerRootCA != "" {
@@ -181,31 +194,47 @@ func (c *Config) ActionsClient(logger logr.Logger, clientOptions ...actions.Clie
return nil, fmt.Errorf("failed to parse root certificate")
}
options = append(options, actions.WithRootCAs(pool))
options = append(options, scaleset.WithRootCAs(pool))
}
proxyFunc := httpproxy.FromEnvironment().ProxyFunc()
options = append(options, actions.WithProxy(func(req *http.Request) (*url.URL, error) {
options = append(options, scaleset.WithProxy(func(req *http.Request) (*url.URL, error) {
return proxyFunc(req.URL)
}))
client, err := actions.NewClient(c.ConfigureUrl, &creds, options...)
if err != nil {
return nil, fmt.Errorf("failed to create actions client: %w", err)
var client *scaleset.Client
switch c.Token {
case "":
c, err := scaleset.NewClientWithGitHubApp(
scaleset.ClientWithGitHubAppConfig{
GitHubConfigURL: c.ConfigureUrl,
GitHubAppAuth: scaleset.GitHubAppAuth{
ClientID: c.AppConfig.AppID,
InstallationID: c.AppConfig.AppInstallationID,
PrivateKey: c.AppConfig.AppPrivateKey,
},
SystemInfo: systemInfo,
},
options...,
)
if err != nil {
return nil, fmt.Errorf("failed to instantiate client with GitHub App auth: %w", err)
}
client = c
default:
c, err := scaleset.NewClientWithPersonalAccessToken(
scaleset.NewClientWithPersonalAccessTokenConfig{
GitHubConfigURL: c.ConfigureUrl,
PersonalAccessToken: c.Token,
SystemInfo: systemInfo,
},
options...,
)
if err != nil {
return nil, fmt.Errorf("failed to instantiate client with PAT auth: %w", err)
}
client = c
}
client.SetUserAgent(actions.UserAgentInfo{
Version: build.Version,
CommitSHA: build.CommitSHA,
ScaleSetID: c.RunnerScaleSetId,
HasProxy: hasProxy(),
Subsystem: "ghalistener",
})
return client, nil
}
func hasProxy() bool {
proxyFunc := httpproxy.FromEnvironment().ProxyFunc()
return proxyFunc != nil
}

View File

@@ -3,21 +3,23 @@ package config_test
import (
"context"
"crypto/tls"
"encoding/json"
"log/slog"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"testing"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/github/actions/testserver"
"github.com/go-logr/logr"
"github.com/actions/scaleset"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var discardLogger = slog.New(slog.DiscardHandler)
func TestCustomerServerRootCA(t *testing.T) {
ctx := context.Background()
certsFolder := filepath.Join(
@@ -59,7 +61,7 @@ func TestCustomerServerRootCA(t *testing.T) {
},
}
client, err := config.ActionsClient(logr.Discard())
client, err := config.ActionsClient(discardLogger)
require.NoError(t, err)
_, err = client.GetRunnerScaleSet(ctx, 1, "test")
require.NoError(t, err)
@@ -67,18 +69,19 @@ func TestCustomerServerRootCA(t *testing.T) {
}
func TestProxySettings(t *testing.T) {
assertHasProxy := func(t *testing.T, debugInfo string, want bool) {
type debugInfoContent struct {
HasProxy bool `json:"has_proxy"`
}
var got debugInfoContent
err := json.Unmarshal([]byte(debugInfo), &got)
require.NoError(t, err)
assert.Equal(t, want, got.HasProxy)
}
t.Run("http", func(t *testing.T) {
wentThroughProxy := false
proxy := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
wentThroughProxy = true
}))
t.Cleanup(func() {
proxy.Close()
})
prevProxy := os.Getenv("http_proxy")
os.Setenv("http_proxy", proxy.URL)
os.Setenv("http_proxy", "http://proxy:8080")
defer os.Setenv("http_proxy", prevProxy)
config := config.Config{
@@ -88,29 +91,15 @@ func TestProxySettings(t *testing.T) {
},
}
client, err := config.ActionsClient(logr.Discard())
client, err := config.ActionsClient(discardLogger)
require.NoError(t, err)
req, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
require.NoError(t, err)
_, err = client.Do(req)
require.NoError(t, err)
assert.True(t, wentThroughProxy)
assertHasProxy(t, client.DebugInfo(), true)
})
t.Run("https", func(t *testing.T) {
wentThroughProxy := false
proxy := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
wentThroughProxy = true
}))
t.Cleanup(func() {
proxy.Close()
})
prevProxy := os.Getenv("https_proxy")
os.Setenv("https_proxy", proxy.URL)
os.Setenv("https_proxy", "https://proxy:443")
defer os.Setenv("https_proxy", prevProxy)
config := config.Config{
@@ -120,32 +109,16 @@ func TestProxySettings(t *testing.T) {
},
}
client, err := config.ActionsClient(logr.Discard(), actions.WithRetryMax(0))
client, err := config.ActionsClient(
discardLogger,
scaleset.WithRetryMax(0),
)
require.NoError(t, err)
req, err := http.NewRequest(http.MethodGet, "https://example.com", nil)
require.NoError(t, err)
_, err = client.Do(req)
// proxy doesn't support https
assert.Error(t, err)
assert.True(t, wentThroughProxy)
assertHasProxy(t, client.DebugInfo(), true)
})
t.Run("no_proxy", func(t *testing.T) {
wentThroughProxy := false
proxy := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
wentThroughProxy = true
}))
t.Cleanup(func() {
proxy.Close()
})
prevProxy := os.Getenv("http_proxy")
os.Setenv("http_proxy", proxy.URL)
defer os.Setenv("http_proxy", prevProxy)
prevNoProxy := os.Getenv("no_proxy")
os.Setenv("no_proxy", "example.com")
defer os.Setenv("no_proxy", prevNoProxy)
@@ -157,14 +130,9 @@ func TestProxySettings(t *testing.T) {
},
}
client, err := config.ActionsClient(logr.Discard())
client, err := config.ActionsClient(discardLogger)
require.NoError(t, err)
req, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
require.NoError(t, err)
_, err = client.Do(req)
require.NoError(t, err)
assert.False(t, wentThroughProxy)
assertHasProxy(t, client.DebugInfo(), true)
})
}

View File

@@ -13,7 +13,7 @@ func TestConfigValidationMinMax(t *testing.T) {
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
RunnerScaleSetID: 1,
MinRunners: 5,
MaxRunners: 2,
AppConfig: &appconfig.AppConfig{
@@ -29,7 +29,7 @@ func TestConfigValidationMissingToken(t *testing.T) {
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
RunnerScaleSetID: 1,
}
err := config.Validate()
expectedError := "AppConfig validation failed: missing app config"
@@ -49,7 +49,7 @@ func TestConfigValidationAppKey(t *testing.T) {
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
RunnerScaleSetID: 1,
}
err := config.Validate()
expectedError := "AppConfig validation failed: no credentials provided: either a PAT or GitHub App credentials should be provided"
@@ -66,7 +66,7 @@ func TestConfigValidationAppKey(t *testing.T) {
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
RunnerScaleSetID: 1,
}
err := config.Validate()
expectedError := "AppConfig validation failed: no credentials provided: either a PAT or GitHub App credentials should be provided"
@@ -85,7 +85,7 @@ func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) {
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
RunnerScaleSetID: 1,
}
err := config.Validate()
expectedError := "AppConfig validation failed: both PAT and GitHub App credentials provided. should only provide one"
@@ -97,7 +97,7 @@ func TestConfigValidation(t *testing.T) {
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
RunnerScaleSetID: 1,
MinRunners: 1,
MaxRunners: 5,
AppConfig: &appconfig.AppConfig{
@@ -114,7 +114,7 @@ func TestConfigValidationConfigUrl(t *testing.T) {
config := &Config{
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
RunnerScaleSetID: 1,
}
err := config.Validate()
@@ -128,7 +128,7 @@ func TestConfigValidationWithVaultConfig(t *testing.T) {
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
RunnerScaleSetID: 1,
MinRunners: 1,
MaxRunners: 5,
VaultType: vault.VaultTypeAzureKeyVault,
@@ -143,7 +143,7 @@ func TestConfigValidationWithVaultConfig(t *testing.T) {
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
RunnerScaleSetID: 1,
MinRunners: 1,
MaxRunners: 5,
VaultType: vault.VaultType("invalid_vault_type"),
@@ -158,7 +158,7 @@ func TestConfigValidationWithVaultConfig(t *testing.T) {
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
RunnerScaleSetID: 1,
MinRunners: 1,
MaxRunners: 5,
VaultType: vault.VaultTypeAzureKeyVault,

View File

@@ -1,458 +0,0 @@
package listener
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"os"
"time"
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
"github.com/google/uuid"
)
const (
sessionCreationMaxRetries = 10
)
// message types
const (
messageTypeJobAvailable = "JobAvailable"
messageTypeJobAssigned = "JobAssigned"
messageTypeJobStarted = "JobStarted"
messageTypeJobCompleted = "JobCompleted"
)
//go:generate mockery --name Client --output ./mocks --outpkg mocks --case underscore
type Client interface {
GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*actions.AcquirableJobList, error)
CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*actions.RunnerScaleSetSession, error)
GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error)
DeleteMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, messageId int64) error
AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error)
RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*actions.RunnerScaleSetSession, error)
DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error
}
type Config struct {
Client Client
ScaleSetID int
MinRunners int
MaxRunners int
Logger logr.Logger
Metrics metrics.Publisher
}
func (c *Config) Validate() error {
if c.Client == nil {
return errors.New("client is required")
}
if c.ScaleSetID == 0 {
return errors.New("scaleSetID is required")
}
if c.MinRunners < 0 {
return errors.New("minRunners must be greater than or equal to 0")
}
if c.MaxRunners < 0 {
return errors.New("maxRunners must be greater than or equal to 0")
}
if c.MaxRunners > 0 && c.MinRunners > c.MaxRunners {
return errors.New("minRunners must be less than or equal to maxRunners")
}
return nil
}
// The Listener's role is to manage all interactions with the actions service.
// It receives messages and processes them using the given handler.
type Listener struct {
// configured fields
scaleSetID int // The ID of the scale set associated with the listener.
client Client // The client used to interact with the scale set.
metrics metrics.Publisher // The publisher used to publish metrics.
// internal fields
logger logr.Logger // The logger used for logging.
hostname string // The hostname of the listener.
// updated fields
lastMessageID int64 // The ID of the last processed message.
maxCapacity int // The maximum number of runners that can be created.
session *actions.RunnerScaleSetSession // The session for managing the runner scale set.
}
func New(config Config) (*Listener, error) {
if err := config.Validate(); err != nil {
return nil, fmt.Errorf("invalid config: %w", err)
}
listener := &Listener{
scaleSetID: config.ScaleSetID,
client: config.Client,
logger: config.Logger,
metrics: metrics.Discard,
maxCapacity: config.MaxRunners,
}
if config.Metrics != nil {
listener.metrics = config.Metrics
}
listener.metrics.PublishStatic(config.MinRunners, config.MaxRunners)
hostname, err := os.Hostname()
if err != nil {
hostname = uuid.NewString()
listener.logger.Info("Failed to get hostname, fallback to uuid", "uuid", hostname, "error", err)
}
listener.hostname = hostname
return listener, nil
}
//go:generate mockery --name Handler --output ./mocks --outpkg mocks --case underscore
type Handler interface {
HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error
HandleDesiredRunnerCount(ctx context.Context, count, jobsCompleted int) (int, error)
}
// Listen listens for incoming messages and handles them using the provided handler.
// It continuously listens for messages until the context is cancelled.
// The initial message contains the current statistics and acquirable jobs, if any.
// The handler is responsible for handling the initial message and subsequent messages.
// If an error occurs during any step, Listen returns an error.
func (l *Listener) Listen(ctx context.Context, handler Handler) error {
if err := l.createSession(ctx); err != nil {
return fmt.Errorf("createSession failed: %w", err)
}
defer func() {
if err := l.deleteMessageSession(); err != nil {
l.logger.Error(err, "failed to delete message session")
}
}()
initialMessage := &actions.RunnerScaleSetMessage{
MessageId: 0,
MessageType: "RunnerScaleSetJobMessages",
Statistics: l.session.Statistics,
Body: "",
}
if l.session.Statistics == nil {
return fmt.Errorf("session statistics is nil")
}
l.metrics.PublishStatistics(initialMessage.Statistics)
desiredRunners, err := handler.HandleDesiredRunnerCount(ctx, initialMessage.Statistics.TotalAssignedJobs, 0)
if err != nil {
return fmt.Errorf("handling initial message failed: %w", err)
}
l.metrics.PublishDesiredRunners(desiredRunners)
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
msg, err := l.getMessage(ctx)
if err != nil {
return fmt.Errorf("failed to get message: %w", err)
}
if msg == nil {
_, err := handler.HandleDesiredRunnerCount(ctx, 0, 0)
if err != nil {
return fmt.Errorf("handling nil message failed: %w", err)
}
continue
}
// Remove cancellation from the context to avoid cancelling the message handling.
if err := l.handleMessage(context.WithoutCancel(ctx), handler, msg); err != nil {
return fmt.Errorf("failed to handle message: %w", err)
}
}
}
func (l *Listener) handleMessage(ctx context.Context, handler Handler, msg *actions.RunnerScaleSetMessage) error {
parsedMsg, err := l.parseMessage(ctx, msg)
if err != nil {
return fmt.Errorf("failed to parse message: %w", err)
}
l.metrics.PublishStatistics(parsedMsg.statistics)
if len(parsedMsg.jobsAvailable) > 0 {
acquiredJobIDs, err := l.acquireAvailableJobs(ctx, parsedMsg.jobsAvailable)
if err != nil {
return fmt.Errorf("failed to acquire jobs: %w", err)
}
l.logger.Info("Jobs are acquired", "count", len(acquiredJobIDs), "requestIds", fmt.Sprint(acquiredJobIDs))
}
for _, jobCompleted := range parsedMsg.jobsCompleted {
l.metrics.PublishJobCompleted(jobCompleted)
}
l.lastMessageID = msg.MessageId
if err := l.deleteLastMessage(ctx); err != nil {
return fmt.Errorf("failed to delete message: %w", err)
}
for _, jobStarted := range parsedMsg.jobsStarted {
if err := handler.HandleJobStarted(ctx, jobStarted); err != nil {
return fmt.Errorf("failed to handle job started: %w", err)
}
l.metrics.PublishJobStarted(jobStarted)
}
desiredRunners, err := handler.HandleDesiredRunnerCount(ctx, parsedMsg.statistics.TotalAssignedJobs, len(parsedMsg.jobsCompleted))
if err != nil {
return fmt.Errorf("failed to handle desired runner count: %w", err)
}
l.metrics.PublishDesiredRunners(desiredRunners)
return nil
}
func (l *Listener) createSession(ctx context.Context) error {
var session *actions.RunnerScaleSetSession
var retries int
for {
var err error
session, err = l.client.CreateMessageSession(ctx, l.scaleSetID, l.hostname)
if err == nil {
break
}
clientErr := &actions.HttpClientSideError{}
if !errors.As(err, &clientErr) {
return fmt.Errorf("failed to create session: %w", err)
}
if clientErr.Code != http.StatusConflict {
return fmt.Errorf("failed to create session: %w", err)
}
retries++
if retries >= sessionCreationMaxRetries {
return fmt.Errorf("failed to create session after %d retries: %w", retries, err)
}
l.logger.Info("Unable to create message session. Will try again in 30 seconds", "error", err.Error())
select {
case <-ctx.Done():
return fmt.Errorf("context cancelled: %w", ctx.Err())
case <-time.After(30 * time.Second):
}
}
statistics, err := json.Marshal(session.Statistics)
if err != nil {
return fmt.Errorf("failed to marshal statistics: %w", err)
}
l.logger.Info("Current runner scale set statistics.", "statistics", string(statistics))
l.session = session
return nil
}
func (l *Listener) getMessage(ctx context.Context) (*actions.RunnerScaleSetMessage, error) {
l.logger.Info("Getting next message", "lastMessageID", l.lastMessageID)
msg, err := l.client.GetMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID, l.maxCapacity)
if err == nil { // if NO error
return msg, nil
}
expiredError := &actions.MessageQueueTokenExpiredError{}
if !errors.As(err, &expiredError) {
return nil, fmt.Errorf("failed to get next message: %w", err)
}
if err := l.refreshSession(ctx); err != nil {
return nil, err
}
l.logger.Info("Getting next message", "lastMessageID", l.lastMessageID)
msg, err = l.client.GetMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID, l.maxCapacity)
if err != nil { // if NO error
return nil, fmt.Errorf("failed to get next message after message session refresh: %w", err)
}
return msg, nil
}
func (l *Listener) deleteLastMessage(ctx context.Context) error {
l.logger.Info("Deleting last message", "lastMessageID", l.lastMessageID)
err := l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID)
if err == nil { // if NO error
return nil
}
expiredError := &actions.MessageQueueTokenExpiredError{}
if !errors.As(err, &expiredError) {
return fmt.Errorf("failed to delete last message: %w", err)
}
if err := l.refreshSession(ctx); err != nil {
return err
}
err = l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID)
if err != nil {
return fmt.Errorf("failed to delete last message after message session refresh: %w", err)
}
return nil
}
type parsedMessage struct {
statistics *actions.RunnerScaleSetStatistic
jobsStarted []*actions.JobStarted
jobsAvailable []*actions.JobAvailable
jobsCompleted []*actions.JobCompleted
}
func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSetMessage) (*parsedMessage, error) {
if msg.MessageType != "RunnerScaleSetJobMessages" {
l.logger.Info("Skipping message", "messageType", msg.MessageType)
return nil, fmt.Errorf("invalid message type: %s", msg.MessageType)
}
l.logger.Info("Processing message", "messageId", msg.MessageId, "messageType", msg.MessageType)
if msg.Statistics == nil {
return nil, fmt.Errorf("invalid message: statistics is nil")
}
l.logger.Info("New runner scale set statistics.", "statistics", msg.Statistics)
var batchedMessages []json.RawMessage
if len(msg.Body) > 0 {
if err := json.Unmarshal([]byte(msg.Body), &batchedMessages); err != nil {
return nil, fmt.Errorf("failed to unmarshal batched messages: %w", err)
}
}
parsedMsg := &parsedMessage{
statistics: msg.Statistics,
}
for _, msg := range batchedMessages {
var messageType actions.JobMessageType
if err := json.Unmarshal(msg, &messageType); err != nil {
return nil, fmt.Errorf("failed to decode job message type: %w", err)
}
switch messageType.MessageType {
case messageTypeJobAvailable:
var jobAvailable actions.JobAvailable
if err := json.Unmarshal(msg, &jobAvailable); err != nil {
return nil, fmt.Errorf("failed to decode job available: %w", err)
}
l.logger.Info("Job available message received", "jobId", jobAvailable.JobID)
parsedMsg.jobsAvailable = append(parsedMsg.jobsAvailable, &jobAvailable)
case messageTypeJobAssigned:
var jobAssigned actions.JobAssigned
if err := json.Unmarshal(msg, &jobAssigned); err != nil {
return nil, fmt.Errorf("failed to decode job assigned: %w", err)
}
l.logger.Info("Job assigned message received", "jobId", jobAssigned.JobID)
case messageTypeJobStarted:
var jobStarted actions.JobStarted
if err := json.Unmarshal(msg, &jobStarted); err != nil {
return nil, fmt.Errorf("could not decode job started message. %w", err)
}
l.logger.Info("Job started message received.", "JobID", jobStarted.JobID, "RunnerId", jobStarted.RunnerID)
parsedMsg.jobsStarted = append(parsedMsg.jobsStarted, &jobStarted)
case messageTypeJobCompleted:
var jobCompleted actions.JobCompleted
if err := json.Unmarshal(msg, &jobCompleted); err != nil {
return nil, fmt.Errorf("failed to decode job completed: %w", err)
}
l.logger.Info(
"Job completed message received.",
"JobID", jobCompleted.JobID,
"Result", jobCompleted.Result,
"RunnerId", jobCompleted.RunnerId,
"RunnerName", jobCompleted.RunnerName,
)
parsedMsg.jobsCompleted = append(parsedMsg.jobsCompleted, &jobCompleted)
default:
l.logger.Info("unknown job message type.", "messageType", messageType.MessageType)
}
}
return parsedMsg, nil
}
func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*actions.JobAvailable) ([]int64, error) {
ids := make([]int64, 0, len(jobsAvailable))
for _, job := range jobsAvailable {
ids = append(ids, job.RunnerRequestID)
}
l.logger.Info("Acquiring jobs", "count", len(ids), "requestIds", fmt.Sprint(ids))
idsAcquired, err := l.client.AcquireJobs(ctx, l.scaleSetID, l.session.MessageQueueAccessToken, ids)
if err == nil { // if NO errors
return idsAcquired, nil
}
expiredError := &actions.MessageQueueTokenExpiredError{}
if !errors.As(err, &expiredError) {
return nil, fmt.Errorf("failed to acquire jobs: %w", err)
}
if err := l.refreshSession(ctx); err != nil {
return nil, err
}
idsAcquired, err = l.client.AcquireJobs(ctx, l.scaleSetID, l.session.MessageQueueAccessToken, ids)
if err != nil {
return nil, fmt.Errorf("failed to acquire jobs after session refresh: %w", err)
}
return idsAcquired, nil
}
func (l *Listener) refreshSession(ctx context.Context) error {
l.logger.Info("Message queue token is expired during GetNextMessage, refreshing...")
session, err := l.client.RefreshMessageSession(ctx, l.session.RunnerScaleSet.Id, l.session.SessionId)
if err != nil {
return fmt.Errorf("refresh message session failed. %w", err)
}
l.session = session
return nil
}
func (l *Listener) deleteMessageSession() error {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
l.logger.Info("Deleting message session")
if err := l.client.DeleteMessageSession(ctx, l.session.RunnerScaleSet.Id, l.session.SessionId); err != nil {
return fmt.Errorf("failed to delete message session: %w", err)
}
return nil
}

View File

@@ -1,970 +0,0 @@
package listener
import (
"context"
"encoding/json"
"errors"
"net/http"
"testing"
"time"
listenermocks "github.com/actions/actions-runner-controller/cmd/ghalistener/listener/mocks"
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestNew(t *testing.T) {
t.Parallel()
t.Run("InvalidConfig", func(t *testing.T) {
t.Parallel()
var config Config
_, err := New(config)
assert.NotNil(t, err)
})
t.Run("ValidConfig", func(t *testing.T) {
t.Parallel()
config := Config{
Client: listenermocks.NewClient(t),
ScaleSetID: 1,
Metrics: metrics.Discard,
}
l, err := New(config)
assert.Nil(t, err)
assert.NotNil(t, l)
})
}
func TestListener_createSession(t *testing.T) {
t.Parallel()
t.Run("FailOnce", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(nil, assert.AnError).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
err = l.createSession(ctx)
assert.NotNil(t, err)
})
t.Run("FailContext", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(nil,
&actions.HttpClientSideError{Code: http.StatusConflict}).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
err = l.createSession(ctx)
assert.True(t, errors.Is(err, context.DeadlineExceeded))
})
t.Run("SetsSession", func(t *testing.T) {
t.Parallel()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("CreateMessageSession", mock.Anything, mock.Anything, mock.Anything).Return(session, nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
err = l.createSession(context.Background())
assert.Nil(t, err)
assert.Equal(t, session, l.session)
})
}
func TestListener_getMessage(t *testing.T) {
t.Parallel()
t.Run("ReceivesMessage", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
MaxRunners: 10,
}
client := listenermocks.NewClient(t)
want := &actions.RunnerScaleSetMessage{
MessageId: 1,
}
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(want, nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{}
got, err := l.getMessage(ctx)
assert.Nil(t, err)
assert.Equal(t, want, got)
})
t.Run("NotExpiredError", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
MaxRunners: 10,
}
client := listenermocks.NewClient(t)
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(nil, &actions.HttpClientSideError{Code: http.StatusNotFound}).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{}
_, err = l.getMessage(ctx)
assert.NotNil(t, err)
})
t.Run("RefreshAndSucceeds", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
MaxRunners: 10,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once()
want := &actions.RunnerScaleSetMessage{
MessageId: 1,
}
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(want, nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
got, err := l.getMessage(ctx)
assert.Nil(t, err)
assert.Equal(t, want, got)
})
t.Run("RefreshAndFails", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
MaxRunners: 10,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(nil, &actions.MessageQueueTokenExpiredError{}).Twice()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
got, err := l.getMessage(ctx)
assert.NotNil(t, err)
assert.Nil(t, got)
})
}
func TestListener_refreshSession(t *testing.T) {
t.Parallel()
t.Run("SuccessfullyRefreshes", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
newUUID := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &newUUID,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
oldUUID := uuid.New()
l.session = &actions.RunnerScaleSetSession{
SessionId: &oldUUID,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
err = l.refreshSession(ctx)
assert.Nil(t, err)
assert.Equal(t, session, l.session)
})
t.Run("FailsToRefresh", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(nil, errors.New("error")).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
oldUUID := uuid.New()
oldSession := &actions.RunnerScaleSetSession{
SessionId: &oldUUID,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
l.session = oldSession
err = l.refreshSession(ctx)
assert.NotNil(t, err)
assert.Equal(t, oldSession, l.session)
})
}
func TestListener_deleteLastMessage(t *testing.T) {
t.Parallel()
t.Run("SuccessfullyDeletes", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.MatchedBy(func(lastMessageID any) bool {
return lastMessageID.(int64) == int64(5)
})).Return(nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{}
l.lastMessageID = 5
err = l.deleteLastMessage(ctx)
assert.Nil(t, err)
})
t.Run("FailsToDelete", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("error")).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{}
l.lastMessageID = 5
err = l.deleteLastMessage(ctx)
assert.NotNil(t, err)
})
t.Run("RefreshAndSucceeds", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
newUUID := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &newUUID,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(&actions.MessageQueueTokenExpiredError{}).Once()
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.MatchedBy(func(lastMessageID any) bool {
return lastMessageID.(int64) == int64(5)
})).Return(nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
oldUUID := uuid.New()
l.session = &actions.RunnerScaleSetSession{
SessionId: &oldUUID,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
l.lastMessageID = 5
config.Client = client
err = l.deleteLastMessage(ctx)
assert.NoError(t, err)
})
t.Run("RefreshAndFails", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
newUUID := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &newUUID,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(&actions.MessageQueueTokenExpiredError{}).Twice()
config.Client = client
l, err := New(config)
require.Nil(t, err)
oldUUID := uuid.New()
l.session = &actions.RunnerScaleSetSession{
SessionId: &oldUUID,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
l.lastMessageID = 5
config.Client = client
err = l.deleteLastMessage(ctx)
assert.Error(t, err)
})
}
func TestListener_Listen(t *testing.T) {
t.Parallel()
t.Run("CreateSessionFails", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(nil, assert.AnError).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
err = l.Listen(ctx, nil)
assert.NotNil(t, err)
})
t.Run("CallHandleRegardlessOfInitialMessage", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: &actions.RunnerScaleSetStatistic{},
}
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessageSession", mock.Anything, session.RunnerScaleSet.Id, session.SessionId).Return(nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
var called bool
handler := listenermocks.NewHandler(t)
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 0).
Return(0, nil).
Run(
func(mock.Arguments) {
called = true
cancel()
},
).
Once()
err = l.Listen(ctx, handler)
assert.True(t, errors.Is(err, context.Canceled))
assert.True(t, called)
})
t.Run("CancelContextAfterGetMessage", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
MaxRunners: 10,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: &actions.RunnerScaleSetStatistic{},
}
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessageSession", mock.Anything, session.RunnerScaleSet.Id, session.SessionId).Return(nil).Once()
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Statistics: &actions.RunnerScaleSetStatistic{},
}
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).
Return(msg, nil).
Run(
func(mock.Arguments) {
cancel()
},
).
Once()
// Ensure delete message is called without cancel
client.On("DeleteMessage", context.WithoutCancel(ctx), mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
config.Client = client
handler := listenermocks.NewHandler(t)
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 0).
Return(0, nil).
Once()
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 0).
Return(0, nil).
Once()
l, err := New(config)
require.Nil(t, err)
err = l.Listen(ctx, handler)
assert.ErrorIs(t, context.Canceled, err)
})
}
func TestListener_acquireAvailableJobs(t *testing.T) {
t.Parallel()
t.Run("FailingToAcquireJobs", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, assert.AnError).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
uuid := uuid.New()
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: &actions.RunnerScaleSetStatistic{},
}
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 3,
},
},
}
_, err = l.acquireAvailableJobs(ctx, availableJobs)
assert.Error(t, err)
})
t.Run("SuccessfullyAcquiresJobsOnFirstRun", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
jobIDs := []int64{1, 2, 3}
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(jobIDs, nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
uuid := uuid.New()
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: &actions.RunnerScaleSetStatistic{},
}
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 3,
},
},
}
acquiredJobIDs, err := l.acquireAvailableJobs(ctx, availableJobs)
assert.NoError(t, err)
assert.Equal(t, []int64{1, 2, 3}, acquiredJobIDs)
})
t.Run("RefreshAndSucceeds", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
// Second call to AcquireJobs will succeed
want := []int64{1, 2, 3}
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 3,
},
},
}
// First call to AcquireJobs will fail with a token expired error
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).
Run(func(args mock.Arguments) {
ids := args.Get(3).([]int64)
assert.Equal(t, want, ids)
}).
Return(nil, &actions.MessageQueueTokenExpiredError{}).
Once()
// Second call should succeed
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).
Run(func(args mock.Arguments) {
ids := args.Get(3).([]int64)
assert.Equal(t, want, ids)
}).
Return(want, nil).
Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
got, err := l.acquireAvailableJobs(ctx, availableJobs)
assert.Nil(t, err)
assert.Equal(t, want, got)
})
t.Run("RefreshAndFails", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, &actions.MessageQueueTokenExpiredError{}).Twice()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 3,
},
},
}
got, err := l.acquireAvailableJobs(ctx, availableJobs)
assert.NotNil(t, err)
assert.Nil(t, got)
})
}
func TestListener_parseMessage(t *testing.T) {
t.Run("FailOnEmptyStatistics", func(t *testing.T) {
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Statistics: nil,
}
l := &Listener{}
parsedMsg, err := l.parseMessage(context.Background(), msg)
assert.Error(t, err)
assert.Nil(t, parsedMsg)
})
t.Run("FailOnIncorrectMessageType", func(t *testing.T) {
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerMessages", // arbitrary message type
Statistics: &actions.RunnerScaleSetStatistic{},
}
l := &Listener{}
parsedMsg, err := l.parseMessage(context.Background(), msg)
assert.Error(t, err)
assert.Nil(t, parsedMsg)
})
t.Run("ParseAll", func(t *testing.T) {
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Body: "",
Statistics: &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
TotalAcquiredJobs: 2,
TotalAssignedJobs: 3,
TotalRunningJobs: 4,
TotalRegisteredRunners: 5,
TotalBusyRunners: 6,
TotalIdleRunners: 7,
},
}
var batchedMessages []any
jobsAvailable := []*actions.JobAvailable{
{
AcquireJobUrl: "https://github.com/example",
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAvailable,
},
RunnerRequestID: 1,
},
},
{
AcquireJobUrl: "https://github.com/example",
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAvailable,
},
RunnerRequestID: 2,
},
},
}
for _, msg := range jobsAvailable {
batchedMessages = append(batchedMessages, msg)
}
jobsAssigned := []*actions.JobAssigned{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAssigned,
},
RunnerRequestID: 3,
},
},
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAssigned,
},
RunnerRequestID: 4,
},
},
}
for _, msg := range jobsAssigned {
batchedMessages = append(batchedMessages, msg)
}
jobsStarted := []*actions.JobStarted{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobStarted,
},
RunnerRequestID: 5,
},
RunnerID: 2,
RunnerName: "runner2",
},
}
for _, msg := range jobsStarted {
batchedMessages = append(batchedMessages, msg)
}
jobsCompleted := []*actions.JobCompleted{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobCompleted,
},
RunnerRequestID: 6,
},
Result: "success",
RunnerId: 1,
RunnerName: "runner1",
},
}
for _, msg := range jobsCompleted {
batchedMessages = append(batchedMessages, msg)
}
b, err := json.Marshal(batchedMessages)
require.NoError(t, err)
msg.Body = string(b)
l := &Listener{}
parsedMsg, err := l.parseMessage(context.Background(), msg)
require.NoError(t, err)
assert.Equal(t, msg.Statistics, parsedMsg.statistics)
assert.Equal(t, jobsAvailable, parsedMsg.jobsAvailable)
assert.Equal(t, jobsStarted, parsedMsg.jobsStarted)
assert.Equal(t, jobsCompleted, parsedMsg.jobsCompleted)
})
}

View File

@@ -1,205 +0,0 @@
package listener
import (
"context"
"encoding/json"
"testing"
listenermocks "github.com/actions/actions-runner-controller/cmd/ghalistener/listener/mocks"
metricsmocks "github.com/actions/actions-runner-controller/cmd/ghalistener/metrics/mocks"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestInitialMetrics(t *testing.T) {
t.Parallel()
t.Run("SetStaticMetrics", func(t *testing.T) {
t.Parallel()
metrics := metricsmocks.NewPublisher(t)
minRunners := 5
maxRunners := 10
metrics.On("PublishStatic", minRunners, maxRunners).Once()
config := Config{
Client: listenermocks.NewClient(t),
ScaleSetID: 1,
Metrics: metrics,
MinRunners: minRunners,
MaxRunners: maxRunners,
}
l, err := New(config)
assert.Nil(t, err)
assert.NotNil(t, l)
})
t.Run("InitialMessageStatistics", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
sessionStatistics := &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
TotalAcquiredJobs: 2,
TotalAssignedJobs: 3,
TotalRunningJobs: 4,
TotalRegisteredRunners: 5,
TotalBusyRunners: 6,
TotalIdleRunners: 7,
}
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: sessionStatistics,
}
metrics := metricsmocks.NewPublisher(t)
metrics.On("PublishStatic", mock.Anything, mock.Anything).Once()
metrics.On("PublishStatistics", sessionStatistics).Once()
metrics.On("PublishDesiredRunners", sessionStatistics.TotalAssignedJobs).
Run(
func(mock.Arguments) {
cancel()
},
).Once()
config := Config{
Client: listenermocks.NewClient(t),
ScaleSetID: 1,
Metrics: metrics,
}
client := listenermocks.NewClient(t)
client.On("CreateMessageSession", mock.Anything, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessageSession", mock.Anything, session.RunnerScaleSet.Id, session.SessionId).Return(nil).Once()
config.Client = client
handler := listenermocks.NewHandler(t)
handler.On("HandleDesiredRunnerCount", mock.Anything, sessionStatistics.TotalAssignedJobs, 0).
Return(sessionStatistics.TotalAssignedJobs, nil).
Once()
l, err := New(config)
assert.Nil(t, err)
assert.NotNil(t, l)
assert.ErrorIs(t, context.Canceled, l.Listen(ctx, handler))
})
}
func TestHandleMessageMetrics(t *testing.T) {
t.Parallel()
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Body: "",
Statistics: &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
TotalAcquiredJobs: 2,
TotalAssignedJobs: 3,
TotalRunningJobs: 4,
TotalRegisteredRunners: 5,
TotalBusyRunners: 6,
TotalIdleRunners: 7,
},
}
var batchedMessages []any
jobsStarted := []*actions.JobStarted{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobStarted,
},
RunnerRequestID: 8,
},
RunnerID: 3,
RunnerName: "runner3",
},
}
for _, msg := range jobsStarted {
batchedMessages = append(batchedMessages, msg)
}
jobsCompleted := []*actions.JobCompleted{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobCompleted,
},
RunnerRequestID: 6,
},
Result: "success",
RunnerId: 1,
RunnerName: "runner1",
},
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobCompleted,
},
RunnerRequestID: 7,
},
Result: "success",
RunnerId: 2,
RunnerName: "runner2",
},
}
for _, msg := range jobsCompleted {
batchedMessages = append(batchedMessages, msg)
}
b, err := json.Marshal(batchedMessages)
require.NoError(t, err)
msg.Body = string(b)
desiredResult := 4
metrics := metricsmocks.NewPublisher(t)
metrics.On("PublishStatic", 0, 0).Once()
metrics.On("PublishStatistics", msg.Statistics).Once()
metrics.On("PublishJobCompleted", jobsCompleted[0]).Once()
metrics.On("PublishJobCompleted", jobsCompleted[1]).Once()
metrics.On("PublishJobStarted", jobsStarted[0]).Once()
metrics.On("PublishDesiredRunners", desiredResult).Once()
handler := listenermocks.NewHandler(t)
handler.On("HandleJobStarted", mock.Anything, jobsStarted[0]).Return(nil).Once()
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 2).Return(desiredResult, nil).Once()
client := listenermocks.NewClient(t)
client.On("DeleteMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
config := Config{
Client: listenermocks.NewClient(t),
ScaleSetID: 1,
Metrics: metrics,
}
l, err := New(config)
require.NoError(t, err)
l.client = client
l.session = &actions.RunnerScaleSetSession{
OwnerName: "",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "",
MessageQueueAccessToken: "",
Statistics: &actions.RunnerScaleSetStatistic{},
}
err = l.handleMessage(context.Background(), handler, msg)
require.NoError(t, err)
}

View File

@@ -1,190 +0,0 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package mocks
import (
context "context"
actions "github.com/actions/actions-runner-controller/github/actions"
mock "github.com/stretchr/testify/mock"
uuid "github.com/google/uuid"
)
// Client is an autogenerated mock type for the Client type
type Client struct {
mock.Mock
}
// AcquireJobs provides a mock function with given fields: ctx, runnerScaleSetId, messageQueueAccessToken, requestIds
func (_m *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) {
ret := _m.Called(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
var r0 []int64
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, string, []int64) ([]int64, error)); ok {
return rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
}
if rf, ok := ret.Get(0).(func(context.Context, int, string, []int64) []int64); ok {
r0 = rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]int64)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int, string, []int64) error); ok {
r1 = rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CreateMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, owner
func (_m *Client) CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*actions.RunnerScaleSetSession, error) {
ret := _m.Called(ctx, runnerScaleSetId, owner)
var r0 *actions.RunnerScaleSetSession
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, string) (*actions.RunnerScaleSetSession, error)); ok {
return rf(ctx, runnerScaleSetId, owner)
}
if rf, ok := ret.Get(0).(func(context.Context, int, string) *actions.RunnerScaleSetSession); ok {
r0 = rf(ctx, runnerScaleSetId, owner)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*actions.RunnerScaleSetSession)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int, string) error); ok {
r1 = rf(ctx, runnerScaleSetId, owner)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DeleteMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, messageId
func (_m *Client) DeleteMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, messageId int64) error {
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, messageId)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) error); ok {
r0 = rf(ctx, messageQueueUrl, messageQueueAccessToken, messageId)
} else {
r0 = ret.Error(0)
}
return r0
}
// DeleteMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
func (_m *Client) DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error {
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) error); ok {
r0 = rf(ctx, runnerScaleSetId, sessionId)
} else {
r0 = ret.Error(0)
}
return r0
}
// GetAcquirableJobs provides a mock function with given fields: ctx, runnerScaleSetId
func (_m *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*actions.AcquirableJobList, error) {
ret := _m.Called(ctx, runnerScaleSetId)
var r0 *actions.AcquirableJobList
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int) (*actions.AcquirableJobList, error)); ok {
return rf(ctx, runnerScaleSetId)
}
if rf, ok := ret.Get(0).(func(context.Context, int) *actions.AcquirableJobList); ok {
r0 = rf(ctx, runnerScaleSetId)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*actions.AcquirableJobList)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int) error); ok {
r1 = rf(ctx, runnerScaleSetId)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity
func (_m *Client) GetMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error) {
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
var r0 *actions.RunnerScaleSetMessage
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64, int) (*actions.RunnerScaleSetMessage, error)); ok {
return rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
}
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64, int) *actions.RunnerScaleSetMessage); ok {
r0 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*actions.RunnerScaleSetMessage)
}
}
if rf, ok := ret.Get(1).(func(context.Context, string, string, int64, int) error); ok {
r1 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// RefreshMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
func (_m *Client) RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*actions.RunnerScaleSetSession, error) {
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
var r0 *actions.RunnerScaleSetSession
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) (*actions.RunnerScaleSetSession, error)); ok {
return rf(ctx, runnerScaleSetId, sessionId)
}
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) *actions.RunnerScaleSetSession); ok {
r0 = rf(ctx, runnerScaleSetId, sessionId)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*actions.RunnerScaleSetSession)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int, *uuid.UUID) error); ok {
r1 = rf(ctx, runnerScaleSetId, sessionId)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewClient(t interface {
mock.TestingT
Cleanup(func())
}) *Client {
mock := &Client{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -1,68 +0,0 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package mocks
import (
context "context"
actions "github.com/actions/actions-runner-controller/github/actions"
mock "github.com/stretchr/testify/mock"
)
// Handler is an autogenerated mock type for the Handler type
type Handler struct {
mock.Mock
}
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count, jobsCompleted
func (_m *Handler) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error) {
ret := _m.Called(ctx, count, jobsCompleted)
var r0 int
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, int) (int, error)); ok {
return rf(ctx, count, jobsCompleted)
}
if rf, ok := ret.Get(0).(func(context.Context, int, int) int); ok {
r0 = rf(ctx, count, jobsCompleted)
} else {
r0 = ret.Get(0).(int)
}
if rf, ok := ret.Get(1).(func(context.Context, int, int) error); ok {
r1 = rf(ctx, count, jobsCompleted)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// HandleJobStarted provides a mock function with given fields: ctx, jobInfo
func (_m *Handler) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
ret := _m.Called(ctx, jobInfo)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *actions.JobStarted) error); ok {
r0 = rf(ctx, jobInfo)
} else {
r0 = ret.Error(0)
}
return r0
}
// NewHandler creates a new instance of Handler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewHandler(t interface {
mock.TestingT
Cleanup(func())
}) *Handler {
mock := &Handler{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -8,8 +8,13 @@ import (
"os/signal"
"syscall"
"github.com/actions/actions-runner-controller/cmd/ghalistener/app"
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
"github.com/actions/actions-runner-controller/cmd/ghalistener/scaler"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/scaleset/listener"
"github.com/google/uuid"
"golang.org/x/sync/errgroup"
)
func main() {
@@ -28,14 +33,116 @@ func main() {
os.Exit(1)
}
app, err := app.New(*config)
if err != nil {
log.Printf("Failed to initialize app: %v", err)
os.Exit(1)
}
if err := app.Run(ctx); err != nil {
if err := run(ctx, config); err != nil {
log.Printf("Application returned an error: %v", err)
os.Exit(1)
}
}
func run(ctx context.Context, config *config.Config) error {
ghConfig, err := actions.ParseGitHubConfigFromURL(config.ConfigureUrl)
if err != nil {
return fmt.Errorf("failed to parse GitHub config from URL: %w", err)
}
logger, err := config.Logger()
if err != nil {
return fmt.Errorf("failed to create logger: %w", err)
}
var metricsExporter metrics.ServerExporter
if config.MetricsAddr != "" {
metricsExporter = metrics.NewExporter(metrics.ExporterConfig{
ScaleSetName: config.EphemeralRunnerSetName,
ScaleSetNamespace: config.EphemeralRunnerSetNamespace,
Enterprise: ghConfig.Enterprise,
Organization: ghConfig.Organization,
Repository: ghConfig.Repository,
ServerAddr: config.MetricsAddr,
ServerEndpoint: config.MetricsEndpoint,
Metrics: config.Metrics,
Logger: logger.With("component", "metrics exporter"),
})
}
hostname, err := os.Hostname()
if err != nil {
hostname = uuid.NewString()
logger.Info("Failed to get hostname, fallback to uuid", "uuid", hostname, "error", err)
}
scalesetClient, err := config.ActionsClient(logger)
if err != nil {
return fmt.Errorf("failed to create actions client: %w", err)
}
sessionClient, err := scalesetClient.MessageSessionClient(
ctx,
config.RunnerScaleSetID,
hostname,
)
if err != nil {
return fmt.Errorf("failed to create actions message session client: %w", err)
}
defer func() {
if err := sessionClient.Close(context.Background()); err != nil {
logger.Error("Failed to close session client", "error", err)
}
}()
var listenerOptions []listener.Option
if metricsExporter != nil {
listenerOptions = append(
listenerOptions,
listener.WithMetricsRecorder(
metricsExporter,
),
)
metricsExporter.RecordStatic(config.MinRunners, config.MaxRunners)
}
listener, err := listener.New(
sessionClient,
listener.Config{
ScaleSetID: config.RunnerScaleSetID,
MaxRunners: config.MaxRunners,
Logger: logger.With("component", "listener"),
},
listenerOptions...,
)
if err != nil {
return fmt.Errorf("failed to create new listener: %w", err)
}
scaler, err := scaler.New(
scaler.Config{
EphemeralRunnerSetNamespace: config.EphemeralRunnerSetNamespace,
EphemeralRunnerSetName: config.EphemeralRunnerSetName,
MaxRunners: config.MaxRunners,
MinRunners: config.MinRunners,
},
scaler.WithLogger(logger.With("component", "worker")),
)
if err != nil {
return fmt.Errorf("failed to create new kubernetes worker: %w", err)
}
g, ctx := errgroup.WithContext(ctx)
metricsCtx, cancelMetrics := context.WithCancelCause(ctx)
g.Go(func() error {
logger.Info("Starting listener")
listnerErr := listener.Run(ctx, scaler)
cancelMetrics(fmt.Errorf("listener exited: %w", listnerErr))
return listnerErr
})
if metricsExporter != nil {
g.Go(func() error {
logger.Info("Starting metrics server")
return metricsExporter.ListenAndServe(metricsCtx)
})
}
return g.Wait()
}

View File

@@ -2,14 +2,13 @@ package metrics
import (
"context"
"errors"
"log/slog"
"net/http"
"strings"
"time"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
"github.com/actions/scaleset"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
@@ -76,7 +75,7 @@ var metricsHelp = metricsHelpRegistry{
},
}
func (e *exporter) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels {
func (e *exporter) jobLabels(jobBase *scaleset.JobMessageBase) prometheus.Labels {
workflowRefInfo := ParseWorkflowRef(jobBase.JobWorkflowRef)
return prometheus.Labels{
labelKeyEnterprise: e.scaleSetLabels[labelKeyEnterprise],
@@ -90,40 +89,38 @@ func (e *exporter) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels
}
}
func (e *exporter) completedJobLabels(msg *actions.JobCompleted) prometheus.Labels {
func (e *exporter) completedJobLabels(msg *scaleset.JobCompleted) prometheus.Labels {
l := e.jobLabels(&msg.JobMessageBase)
l[labelKeyJobResult] = msg.Result
return l
}
func (e *exporter) startedJobLabels(msg *actions.JobStarted) prometheus.Labels {
func (e *exporter) startedJobLabels(msg *scaleset.JobStarted) prometheus.Labels {
return e.jobLabels(&msg.JobMessageBase)
}
//go:generate mockery --name Publisher --output ./mocks --outpkg mocks --case underscore
type Publisher interface {
PublishStatic(min, max int)
PublishStatistics(stats *actions.RunnerScaleSetStatistic)
PublishJobStarted(msg *actions.JobStarted)
PublishJobCompleted(msg *actions.JobCompleted)
PublishDesiredRunners(count int)
type Recorder interface {
RecordStatic(min, max int)
RecordStatistics(stats *scaleset.RunnerScaleSetStatistic)
RecordJobStarted(msg *scaleset.JobStarted)
RecordJobCompleted(msg *scaleset.JobCompleted)
RecordDesiredRunners(count int)
}
//go:generate mockery --name ServerPublisher --output ./mocks --outpkg mocks --case underscore
type ServerExporter interface {
Publisher
Recorder
ListenAndServe(ctx context.Context) error
}
var (
_ Publisher = &discard{}
_ Recorder = &discard{}
_ ServerExporter = &exporter{}
)
var Discard Publisher = &discard{}
var Discard Recorder = &discard{}
type exporter struct {
logger logr.Logger
logger *slog.Logger
scaleSetLabels prometheus.Labels
*metrics
srv *http.Server
@@ -158,7 +155,7 @@ type ExporterConfig struct {
Repository string
ServerAddr string
ServerEndpoint string
Logger logr.Logger
Logger *slog.Logger
Metrics *v1alpha1.MetricsConfig
}
@@ -309,7 +306,7 @@ func NewExporter(config ExporterConfig) ServerExporter {
)
return &exporter{
logger: config.Logger.WithName("metrics"),
logger: config.Logger.With("component", "metrics exporter"),
scaleSetLabels: prometheus.Labels{
labelKeyRunnerScaleSetName: config.ScaleSetName,
labelKeyRunnerScaleSetNamespace: config.ScaleSetNamespace,
@@ -325,9 +322,7 @@ func NewExporter(config ExporterConfig) ServerExporter {
}
}
var errUnknownMetricName = errors.New("unknown metric name")
func installMetrics(config v1alpha1.MetricsConfig, reg *prometheus.Registry, logger logr.Logger) *metrics {
func installMetrics(config v1alpha1.MetricsConfig, reg *prometheus.Registry, logger *slog.Logger) *metrics {
logger.Info(
"Registering metrics",
"gauges",
@@ -345,7 +340,11 @@ func installMetrics(config v1alpha1.MetricsConfig, reg *prometheus.Registry, log
for name, cfg := range config.Gauges {
help, ok := metricsHelp.gauges[name]
if !ok {
logger.Error(errUnknownMetricName, "name", name, "kind", "gauge")
logger.Error(
"unknown metric name",
slog.String("name", name),
slog.String("kind", "gauge"),
)
continue
}
@@ -367,7 +366,11 @@ func installMetrics(config v1alpha1.MetricsConfig, reg *prometheus.Registry, log
for name, cfg := range config.Counters {
help, ok := metricsHelp.counters[name]
if !ok {
logger.Error(errUnknownMetricName, "name", name, "kind", "counter")
logger.Error(
"unknown metric name",
slog.String("name", name),
slog.String("kind", "counter"),
)
continue
}
c := prometheus.V2.NewCounterVec(prometheus.CounterVecOpts{
@@ -388,7 +391,11 @@ func installMetrics(config v1alpha1.MetricsConfig, reg *prometheus.Registry, log
for name, cfg := range config.Histograms {
help, ok := metricsHelp.histograms[name]
if !ok {
logger.Error(errUnknownMetricName, "name", name, "kind", "histogram")
logger.Error(
"unknown metric name",
slog.String("name", name),
slog.String("kind", "histogram"),
)
continue
}
@@ -464,12 +471,12 @@ func (e *exporter) observeHistogram(name string, allLabels prometheus.Labels, va
m.histogram.With(labels).Observe(val)
}
func (e *exporter) PublishStatic(min, max int) {
func (e *exporter) RecordStatic(min, max int) {
e.setGauge(MetricMaxRunners, e.scaleSetLabels, float64(max))
e.setGauge(MetricMinRunners, e.scaleSetLabels, float64(min))
}
func (e *exporter) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
func (e *exporter) RecordStatistics(stats *scaleset.RunnerScaleSetStatistic) {
e.setGauge(MetricAssignedJobs, e.scaleSetLabels, float64(stats.TotalAssignedJobs))
e.setGauge(MetricRunningJobs, e.scaleSetLabels, float64(stats.TotalRunningJobs))
e.setGauge(MetricRegisteredRunners, e.scaleSetLabels, float64(stats.TotalRegisteredRunners))
@@ -477,7 +484,7 @@ func (e *exporter) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
e.setGauge(MetricIdleRunners, e.scaleSetLabels, float64(stats.TotalIdleRunners))
}
func (e *exporter) PublishJobStarted(msg *actions.JobStarted) {
func (e *exporter) RecordJobStarted(msg *scaleset.JobStarted) {
l := e.startedJobLabels(msg)
e.incCounter(MetricStartedJobsTotal, l)
@@ -485,7 +492,7 @@ func (e *exporter) PublishJobStarted(msg *actions.JobStarted) {
e.observeHistogram(MetricJobStartupDurationSeconds, l, float64(startupDuration))
}
func (e *exporter) PublishJobCompleted(msg *actions.JobCompleted) {
func (e *exporter) RecordJobCompleted(msg *scaleset.JobCompleted) {
l := e.completedJobLabels(msg)
e.incCounter(MetricCompletedJobsTotal, l)
@@ -493,17 +500,17 @@ func (e *exporter) PublishJobCompleted(msg *actions.JobCompleted) {
e.observeHistogram(MetricJobExecutionDurationSeconds, l, float64(executionDuration))
}
func (e *exporter) PublishDesiredRunners(count int) {
func (e *exporter) RecordDesiredRunners(count int) {
e.setGauge(MetricDesiredRunners, e.scaleSetLabels, float64(count))
}
type discard struct{}
func (*discard) PublishStatic(int, int) {}
func (*discard) PublishStatistics(*actions.RunnerScaleSetStatistic) {}
func (*discard) PublishJobStarted(*actions.JobStarted) {}
func (*discard) PublishJobCompleted(*actions.JobCompleted) {}
func (*discard) PublishDesiredRunners(int) {}
func (*discard) RecordStatic(int, int) {}
func (*discard) RecordStatistics(*scaleset.RunnerScaleSetStatistic) {}
func (*discard) RecordJobStarted(*scaleset.JobStarted) {}
func (*discard) RecordJobCompleted(*scaleset.JobCompleted) {}
func (*discard) RecordDesiredRunners(int) {}
var defaultRuntimeBuckets []float64 = []float64{
0.01,

View File

@@ -3,7 +3,7 @@ package metrics
import (
"testing"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/scaleset"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
)
@@ -22,13 +22,13 @@ func TestMetricsWithWorkflowRefParsing(t *testing.T) {
tests := []struct {
name string
jobBase actions.JobMessageBase
jobBase scaleset.JobMessageBase
wantName string
wantTarget string
}{
{
name: "main branch workflow",
jobBase: actions.JobMessageBase{
jobBase: scaleset.JobMessageBase{
OwnerName: "actions",
RepositoryName: "runner",
JobDisplayName: "Build and Test",
@@ -40,7 +40,7 @@ func TestMetricsWithWorkflowRefParsing(t *testing.T) {
},
{
name: "feature branch workflow",
jobBase: actions.JobMessageBase{
jobBase: scaleset.JobMessageBase{
OwnerName: "myorg",
RepositoryName: "myrepo",
JobDisplayName: "CI/CD Pipeline",
@@ -52,7 +52,7 @@ func TestMetricsWithWorkflowRefParsing(t *testing.T) {
},
{
name: "pull request workflow",
jobBase: actions.JobMessageBase{
jobBase: scaleset.JobMessageBase{
OwnerName: "actions",
RepositoryName: "runner",
JobDisplayName: "PR Checks",
@@ -64,7 +64,7 @@ func TestMetricsWithWorkflowRefParsing(t *testing.T) {
},
{
name: "tag workflow",
jobBase: actions.JobMessageBase{
jobBase: scaleset.JobMessageBase{
OwnerName: "actions",
RepositoryName: "runner",
JobDisplayName: "Release",

View File

@@ -1,15 +1,17 @@
package metrics
import (
"log/slog"
"testing"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var discardLogger = slog.New(slog.DiscardHandler)
func TestInstallMetrics(t *testing.T) {
metricsConfig := v1alpha1.MetricsConfig{
Counters: map[string]*v1alpha1.CounterMetric{
@@ -74,7 +76,7 @@ func TestInstallMetrics(t *testing.T) {
}
reg := prometheus.NewRegistry()
got := installMetrics(metricsConfig, reg, logr.Discard())
got := installMetrics(metricsConfig, reg, discardLogger)
assert.Len(t, got.counters, 1)
assert.Len(t, got.gauges, 1)
assert.Len(t, got.histograms, 2)
@@ -98,7 +100,7 @@ func TestNewExporter(t *testing.T) {
Repository: "repo",
ServerAddr: ":6060",
ServerEndpoint: "/metrics",
Logger: logr.Discard(),
Logger: discardLogger,
Metrics: nil, // when metrics is nil, all default metrics should be registered
}
@@ -140,7 +142,7 @@ func TestNewExporter(t *testing.T) {
Repository: "repo",
ServerAddr: "", // empty ServerAddr should default to ":8080"
ServerEndpoint: "",
Logger: logr.Discard(),
Logger: discardLogger,
Metrics: nil, // when metrics is nil, all default metrics should be registered
}
@@ -201,7 +203,7 @@ func TestNewExporter(t *testing.T) {
Repository: "repo",
ServerAddr: ":6060",
ServerEndpoint: "/metrics",
Logger: logr.Discard(),
Logger: discardLogger,
Metrics: &metricsConfig,
}
@@ -244,7 +246,7 @@ func TestExporterConfigDefaults(t *testing.T) {
Repository: "repo",
ServerAddr: "",
ServerEndpoint: "",
Logger: logr.Discard(),
Logger: discardLogger,
Metrics: nil, // when metrics is nil, all default metrics should be registered
}
@@ -257,7 +259,7 @@ func TestExporterConfigDefaults(t *testing.T) {
Repository: "repo",
ServerAddr: ":8080", // default server address
ServerEndpoint: "/metrics", // default server endpoint
Logger: logr.Discard(),
Logger: discardLogger,
Metrics: &defaultMetrics, // when metrics is nil, all default metrics should be registered
}

View File

@@ -1,53 +0,0 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package mocks
import (
actions "github.com/actions/actions-runner-controller/github/actions"
mock "github.com/stretchr/testify/mock"
)
// Publisher is an autogenerated mock type for the Publisher type
type Publisher struct {
mock.Mock
}
// PublishDesiredRunners provides a mock function with given fields: count
func (_m *Publisher) PublishDesiredRunners(count int) {
_m.Called(count)
}
// PublishJobCompleted provides a mock function with given fields: msg
func (_m *Publisher) PublishJobCompleted(msg *actions.JobCompleted) {
_m.Called(msg)
}
// PublishJobStarted provides a mock function with given fields: msg
func (_m *Publisher) PublishJobStarted(msg *actions.JobStarted) {
_m.Called(msg)
}
// PublishStatic provides a mock function with given fields: min, max
func (_m *Publisher) PublishStatic(min int, max int) {
_m.Called(min, max)
}
// PublishStatistics provides a mock function with given fields: stats
func (_m *Publisher) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
_m.Called(stats)
}
// NewPublisher creates a new instance of Publisher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewPublisher(t interface {
mock.TestingT
Cleanup(func())
}) *Publisher {
mock := &Publisher{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -1,69 +0,0 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package mocks
import (
context "context"
actions "github.com/actions/actions-runner-controller/github/actions"
mock "github.com/stretchr/testify/mock"
)
// ServerPublisher is an autogenerated mock type for the ServerPublisher type
type ServerPublisher struct {
mock.Mock
}
// ListenAndServe provides a mock function with given fields: ctx
func (_m *ServerPublisher) ListenAndServe(ctx context.Context) error {
ret := _m.Called(ctx)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// PublishDesiredRunners provides a mock function with given fields: count
func (_m *ServerPublisher) PublishDesiredRunners(count int) {
_m.Called(count)
}
// PublishJobCompleted provides a mock function with given fields: msg
func (_m *ServerPublisher) PublishJobCompleted(msg *actions.JobCompleted) {
_m.Called(msg)
}
// PublishJobStarted provides a mock function with given fields: msg
func (_m *ServerPublisher) PublishJobStarted(msg *actions.JobStarted) {
_m.Called(msg)
}
// PublishStatic provides a mock function with given fields: min, max
func (_m *ServerPublisher) PublishStatic(min int, max int) {
_m.Called(min, max)
}
// PublishStatistics provides a mock function with given fields: stats
func (_m *ServerPublisher) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
_m.Called(stats)
}
// NewServerPublisher creates a new instance of ServerPublisher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewServerPublisher(t interface {
mock.TestingT
Cleanup(func())
}) *ServerPublisher {
mock := &ServerPublisher{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -0,0 +1,529 @@
// Code generated by mockery; DO NOT EDIT.
// github.com/vektra/mockery
// template: testify
package metrics
import (
"context"
"github.com/actions/scaleset"
mock "github.com/stretchr/testify/mock"
)
// NewMockRecorder creates a new instance of MockRecorder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockRecorder(t interface {
mock.TestingT
Cleanup(func())
}) *MockRecorder {
mock := &MockRecorder{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
// MockRecorder is an autogenerated mock type for the Recorder type
type MockRecorder struct {
mock.Mock
}
type MockRecorder_Expecter struct {
mock *mock.Mock
}
func (_m *MockRecorder) EXPECT() *MockRecorder_Expecter {
return &MockRecorder_Expecter{mock: &_m.Mock}
}
// RecordDesiredRunners provides a mock function for the type MockRecorder
func (_mock *MockRecorder) RecordDesiredRunners(count int) {
_mock.Called(count)
return
}
// MockRecorder_RecordDesiredRunners_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordDesiredRunners'
type MockRecorder_RecordDesiredRunners_Call struct {
*mock.Call
}
// RecordDesiredRunners is a helper method to define mock.On call
// - count int
func (_e *MockRecorder_Expecter) RecordDesiredRunners(count interface{}) *MockRecorder_RecordDesiredRunners_Call {
return &MockRecorder_RecordDesiredRunners_Call{Call: _e.mock.On("RecordDesiredRunners", count)}
}
func (_c *MockRecorder_RecordDesiredRunners_Call) Run(run func(count int)) *MockRecorder_RecordDesiredRunners_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 int
if args[0] != nil {
arg0 = args[0].(int)
}
run(
arg0,
)
})
return _c
}
func (_c *MockRecorder_RecordDesiredRunners_Call) Return() *MockRecorder_RecordDesiredRunners_Call {
_c.Call.Return()
return _c
}
func (_c *MockRecorder_RecordDesiredRunners_Call) RunAndReturn(run func(count int)) *MockRecorder_RecordDesiredRunners_Call {
_c.Run(run)
return _c
}
// RecordJobCompleted provides a mock function for the type MockRecorder
func (_mock *MockRecorder) RecordJobCompleted(msg *scaleset.JobCompleted) {
_mock.Called(msg)
return
}
// MockRecorder_RecordJobCompleted_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordJobCompleted'
type MockRecorder_RecordJobCompleted_Call struct {
*mock.Call
}
// RecordJobCompleted is a helper method to define mock.On call
// - msg *scaleset.JobCompleted
func (_e *MockRecorder_Expecter) RecordJobCompleted(msg interface{}) *MockRecorder_RecordJobCompleted_Call {
return &MockRecorder_RecordJobCompleted_Call{Call: _e.mock.On("RecordJobCompleted", msg)}
}
func (_c *MockRecorder_RecordJobCompleted_Call) Run(run func(msg *scaleset.JobCompleted)) *MockRecorder_RecordJobCompleted_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *scaleset.JobCompleted
if args[0] != nil {
arg0 = args[0].(*scaleset.JobCompleted)
}
run(
arg0,
)
})
return _c
}
func (_c *MockRecorder_RecordJobCompleted_Call) Return() *MockRecorder_RecordJobCompleted_Call {
_c.Call.Return()
return _c
}
func (_c *MockRecorder_RecordJobCompleted_Call) RunAndReturn(run func(msg *scaleset.JobCompleted)) *MockRecorder_RecordJobCompleted_Call {
_c.Run(run)
return _c
}
// RecordJobStarted provides a mock function for the type MockRecorder
func (_mock *MockRecorder) RecordJobStarted(msg *scaleset.JobStarted) {
_mock.Called(msg)
return
}
// MockRecorder_RecordJobStarted_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordJobStarted'
type MockRecorder_RecordJobStarted_Call struct {
*mock.Call
}
// RecordJobStarted is a helper method to define mock.On call
// - msg *scaleset.JobStarted
func (_e *MockRecorder_Expecter) RecordJobStarted(msg interface{}) *MockRecorder_RecordJobStarted_Call {
return &MockRecorder_RecordJobStarted_Call{Call: _e.mock.On("RecordJobStarted", msg)}
}
func (_c *MockRecorder_RecordJobStarted_Call) Run(run func(msg *scaleset.JobStarted)) *MockRecorder_RecordJobStarted_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *scaleset.JobStarted
if args[0] != nil {
arg0 = args[0].(*scaleset.JobStarted)
}
run(
arg0,
)
})
return _c
}
func (_c *MockRecorder_RecordJobStarted_Call) Return() *MockRecorder_RecordJobStarted_Call {
_c.Call.Return()
return _c
}
func (_c *MockRecorder_RecordJobStarted_Call) RunAndReturn(run func(msg *scaleset.JobStarted)) *MockRecorder_RecordJobStarted_Call {
_c.Run(run)
return _c
}
// RecordStatic provides a mock function for the type MockRecorder
func (_mock *MockRecorder) RecordStatic(min int, max int) {
_mock.Called(min, max)
return
}
// MockRecorder_RecordStatic_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordStatic'
type MockRecorder_RecordStatic_Call struct {
*mock.Call
}
// RecordStatic is a helper method to define mock.On call
// - min int
// - max int
func (_e *MockRecorder_Expecter) RecordStatic(min interface{}, max interface{}) *MockRecorder_RecordStatic_Call {
return &MockRecorder_RecordStatic_Call{Call: _e.mock.On("RecordStatic", min, max)}
}
func (_c *MockRecorder_RecordStatic_Call) Run(run func(min int, max int)) *MockRecorder_RecordStatic_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 int
if args[0] != nil {
arg0 = args[0].(int)
}
var arg1 int
if args[1] != nil {
arg1 = args[1].(int)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockRecorder_RecordStatic_Call) Return() *MockRecorder_RecordStatic_Call {
_c.Call.Return()
return _c
}
func (_c *MockRecorder_RecordStatic_Call) RunAndReturn(run func(min int, max int)) *MockRecorder_RecordStatic_Call {
_c.Run(run)
return _c
}
// RecordStatistics provides a mock function for the type MockRecorder
func (_mock *MockRecorder) RecordStatistics(stats *scaleset.RunnerScaleSetStatistic) {
_mock.Called(stats)
return
}
// MockRecorder_RecordStatistics_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordStatistics'
type MockRecorder_RecordStatistics_Call struct {
*mock.Call
}
// RecordStatistics is a helper method to define mock.On call
// - stats *scaleset.RunnerScaleSetStatistic
func (_e *MockRecorder_Expecter) RecordStatistics(stats interface{}) *MockRecorder_RecordStatistics_Call {
return &MockRecorder_RecordStatistics_Call{Call: _e.mock.On("RecordStatistics", stats)}
}
func (_c *MockRecorder_RecordStatistics_Call) Run(run func(stats *scaleset.RunnerScaleSetStatistic)) *MockRecorder_RecordStatistics_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *scaleset.RunnerScaleSetStatistic
if args[0] != nil {
arg0 = args[0].(*scaleset.RunnerScaleSetStatistic)
}
run(
arg0,
)
})
return _c
}
func (_c *MockRecorder_RecordStatistics_Call) Return() *MockRecorder_RecordStatistics_Call {
_c.Call.Return()
return _c
}
func (_c *MockRecorder_RecordStatistics_Call) RunAndReturn(run func(stats *scaleset.RunnerScaleSetStatistic)) *MockRecorder_RecordStatistics_Call {
_c.Run(run)
return _c
}
// NewMockServerExporter creates a new instance of MockServerExporter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockServerExporter(t interface {
mock.TestingT
Cleanup(func())
}) *MockServerExporter {
mock := &MockServerExporter{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
// MockServerExporter is an autogenerated mock type for the ServerExporter type
type MockServerExporter struct {
mock.Mock
}
type MockServerExporter_Expecter struct {
mock *mock.Mock
}
func (_m *MockServerExporter) EXPECT() *MockServerExporter_Expecter {
return &MockServerExporter_Expecter{mock: &_m.Mock}
}
// ListenAndServe provides a mock function for the type MockServerExporter
func (_mock *MockServerExporter) ListenAndServe(ctx context.Context) error {
ret := _mock.Called(ctx)
if len(ret) == 0 {
panic("no return value specified for ListenAndServe")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = returnFunc(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockServerExporter_ListenAndServe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListenAndServe'
type MockServerExporter_ListenAndServe_Call struct {
*mock.Call
}
// ListenAndServe is a helper method to define mock.On call
// - ctx context.Context
func (_e *MockServerExporter_Expecter) ListenAndServe(ctx interface{}) *MockServerExporter_ListenAndServe_Call {
return &MockServerExporter_ListenAndServe_Call{Call: _e.mock.On("ListenAndServe", ctx)}
}
func (_c *MockServerExporter_ListenAndServe_Call) Run(run func(ctx context.Context)) *MockServerExporter_ListenAndServe_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
run(
arg0,
)
})
return _c
}
func (_c *MockServerExporter_ListenAndServe_Call) Return(err error) *MockServerExporter_ListenAndServe_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockServerExporter_ListenAndServe_Call) RunAndReturn(run func(ctx context.Context) error) *MockServerExporter_ListenAndServe_Call {
_c.Call.Return(run)
return _c
}
// RecordDesiredRunners provides a mock function for the type MockServerExporter
func (_mock *MockServerExporter) RecordDesiredRunners(count int) {
_mock.Called(count)
return
}
// MockServerExporter_RecordDesiredRunners_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordDesiredRunners'
type MockServerExporter_RecordDesiredRunners_Call struct {
*mock.Call
}
// RecordDesiredRunners is a helper method to define mock.On call
// - count int
func (_e *MockServerExporter_Expecter) RecordDesiredRunners(count interface{}) *MockServerExporter_RecordDesiredRunners_Call {
return &MockServerExporter_RecordDesiredRunners_Call{Call: _e.mock.On("RecordDesiredRunners", count)}
}
func (_c *MockServerExporter_RecordDesiredRunners_Call) Run(run func(count int)) *MockServerExporter_RecordDesiredRunners_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 int
if args[0] != nil {
arg0 = args[0].(int)
}
run(
arg0,
)
})
return _c
}
func (_c *MockServerExporter_RecordDesiredRunners_Call) Return() *MockServerExporter_RecordDesiredRunners_Call {
_c.Call.Return()
return _c
}
func (_c *MockServerExporter_RecordDesiredRunners_Call) RunAndReturn(run func(count int)) *MockServerExporter_RecordDesiredRunners_Call {
_c.Run(run)
return _c
}
// RecordJobCompleted provides a mock function for the type MockServerExporter
func (_mock *MockServerExporter) RecordJobCompleted(msg *scaleset.JobCompleted) {
_mock.Called(msg)
return
}
// MockServerExporter_RecordJobCompleted_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordJobCompleted'
type MockServerExporter_RecordJobCompleted_Call struct {
*mock.Call
}
// RecordJobCompleted is a helper method to define mock.On call
// - msg *scaleset.JobCompleted
func (_e *MockServerExporter_Expecter) RecordJobCompleted(msg interface{}) *MockServerExporter_RecordJobCompleted_Call {
return &MockServerExporter_RecordJobCompleted_Call{Call: _e.mock.On("RecordJobCompleted", msg)}
}
func (_c *MockServerExporter_RecordJobCompleted_Call) Run(run func(msg *scaleset.JobCompleted)) *MockServerExporter_RecordJobCompleted_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *scaleset.JobCompleted
if args[0] != nil {
arg0 = args[0].(*scaleset.JobCompleted)
}
run(
arg0,
)
})
return _c
}
func (_c *MockServerExporter_RecordJobCompleted_Call) Return() *MockServerExporter_RecordJobCompleted_Call {
_c.Call.Return()
return _c
}
func (_c *MockServerExporter_RecordJobCompleted_Call) RunAndReturn(run func(msg *scaleset.JobCompleted)) *MockServerExporter_RecordJobCompleted_Call {
_c.Run(run)
return _c
}
// RecordJobStarted provides a mock function for the type MockServerExporter
func (_mock *MockServerExporter) RecordJobStarted(msg *scaleset.JobStarted) {
_mock.Called(msg)
return
}
// MockServerExporter_RecordJobStarted_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordJobStarted'
type MockServerExporter_RecordJobStarted_Call struct {
*mock.Call
}
// RecordJobStarted is a helper method to define mock.On call
// - msg *scaleset.JobStarted
func (_e *MockServerExporter_Expecter) RecordJobStarted(msg interface{}) *MockServerExporter_RecordJobStarted_Call {
return &MockServerExporter_RecordJobStarted_Call{Call: _e.mock.On("RecordJobStarted", msg)}
}
func (_c *MockServerExporter_RecordJobStarted_Call) Run(run func(msg *scaleset.JobStarted)) *MockServerExporter_RecordJobStarted_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *scaleset.JobStarted
if args[0] != nil {
arg0 = args[0].(*scaleset.JobStarted)
}
run(
arg0,
)
})
return _c
}
func (_c *MockServerExporter_RecordJobStarted_Call) Return() *MockServerExporter_RecordJobStarted_Call {
_c.Call.Return()
return _c
}
func (_c *MockServerExporter_RecordJobStarted_Call) RunAndReturn(run func(msg *scaleset.JobStarted)) *MockServerExporter_RecordJobStarted_Call {
_c.Run(run)
return _c
}
// RecordStatic provides a mock function for the type MockServerExporter
func (_mock *MockServerExporter) RecordStatic(min int, max int) {
_mock.Called(min, max)
return
}
// MockServerExporter_RecordStatic_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordStatic'
type MockServerExporter_RecordStatic_Call struct {
*mock.Call
}
// RecordStatic is a helper method to define mock.On call
// - min int
// - max int
func (_e *MockServerExporter_Expecter) RecordStatic(min interface{}, max interface{}) *MockServerExporter_RecordStatic_Call {
return &MockServerExporter_RecordStatic_Call{Call: _e.mock.On("RecordStatic", min, max)}
}
func (_c *MockServerExporter_RecordStatic_Call) Run(run func(min int, max int)) *MockServerExporter_RecordStatic_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 int
if args[0] != nil {
arg0 = args[0].(int)
}
var arg1 int
if args[1] != nil {
arg1 = args[1].(int)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockServerExporter_RecordStatic_Call) Return() *MockServerExporter_RecordStatic_Call {
_c.Call.Return()
return _c
}
func (_c *MockServerExporter_RecordStatic_Call) RunAndReturn(run func(min int, max int)) *MockServerExporter_RecordStatic_Call {
_c.Run(run)
return _c
}
// RecordStatistics provides a mock function for the type MockServerExporter
func (_mock *MockServerExporter) RecordStatistics(stats *scaleset.RunnerScaleSetStatistic) {
_mock.Called(stats)
return
}
// MockServerExporter_RecordStatistics_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordStatistics'
type MockServerExporter_RecordStatistics_Call struct {
*mock.Call
}
// RecordStatistics is a helper method to define mock.On call
// - stats *scaleset.RunnerScaleSetStatistic
func (_e *MockServerExporter_Expecter) RecordStatistics(stats interface{}) *MockServerExporter_RecordStatistics_Call {
return &MockServerExporter_RecordStatistics_Call{Call: _e.mock.On("RecordStatistics", stats)}
}
func (_c *MockServerExporter_RecordStatistics_Call) Run(run func(stats *scaleset.RunnerScaleSetStatistic)) *MockServerExporter_RecordStatistics_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *scaleset.RunnerScaleSetStatistic
if args[0] != nil {
arg0 = args[0].(*scaleset.RunnerScaleSetStatistic)
}
run(
arg0,
)
})
return _c
}
func (_c *MockServerExporter_RecordStatistics_Call) Return() *MockServerExporter_RecordStatistics_Call {
_c.Call.Return()
return _c
}
func (_c *MockServerExporter_RecordStatistics_Call) RunAndReturn(run func(stats *scaleset.RunnerScaleSetStatistic)) *MockServerExporter_RecordStatistics_Call {
_c.Run(run)
return _c
}

View File

@@ -1,30 +1,27 @@
package worker
package scaler
import (
"context"
"encoding/json"
"fmt"
"log/slog"
"math"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/logging"
"github.com/actions/scaleset"
"github.com/actions/scaleset/listener"
jsonpatch "github.com/evanphx/json-patch"
"github.com/go-logr/logr"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
const workerName = "kubernetesworker"
type Option func(*Scaler)
type Option func(*Worker)
func WithLogger(logger logr.Logger) Option {
return func(w *Worker) {
logger = logger.WithName(workerName)
w.logger = &logger
func WithLogger(logger *slog.Logger) Option {
return func(w *Scaler) {
w.logger = logger
}
}
@@ -35,23 +32,25 @@ type Config struct {
MinRunners int
}
// The Worker's role is to process the messages it receives from the listener.
// The Scaler's role is to process the messages it receives from the listener.
// It then initiates Kubernetes API requests to carry out the necessary actions.
type Worker struct {
clientset *kubernetes.Clientset
config Config
lastPatch int
patchSeq int
logger *logr.Logger
type Scaler struct {
clientset *kubernetes.Clientset
config Config
targetRunners int
patchSeq int
// dirty is set when there are any events handled before the desired count is called.
dirty bool
logger *slog.Logger
}
var _ listener.Handler = (*Worker)(nil)
var _ listener.Scaler = (*Scaler)(nil)
func New(config Config, options ...Option) (*Worker, error) {
w := &Worker{
config: config,
lastPatch: -1,
patchSeq: -1,
func New(config Config, options ...Option) (*Scaler, error) {
w := &Scaler{
config: config,
targetRunners: -1,
patchSeq: -1,
}
conf, err := rest.InClusterConfig()
@@ -77,14 +76,9 @@ func New(config Config, options ...Option) (*Worker, error) {
return w, nil
}
func (w *Worker) applyDefaults() error {
func (w *Scaler) applyDefaults() error {
if w.logger == nil {
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatJSON)
if err != nil {
return fmt.Errorf("NewLogger failed: %w", err)
}
logger = logger.WithName(workerName)
w.logger = &logger
w.logger = slog.New(slog.DiscardHandler)
}
return nil
@@ -95,7 +89,7 @@ func (w *Worker) applyDefaults() error {
// This update marks the ephemeral runner so that the controller would have more context
// about the ephemeral runner that should not be deleted when scaling down.
// It returns an error if there is any issue with updating the job information.
func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
func (w *Scaler) HandleJobStarted(ctx context.Context, jobInfo *scaleset.JobStarted) error {
w.logger.Info("Updating job info for the runner",
"runnerName", jobInfo.RunnerName,
"ownerName", jobInfo.OwnerName,
@@ -106,6 +100,8 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
"jobDisplayName", jobInfo.JobDisplayName,
"requestId", jobInfo.RunnerRequestID)
w.dirty = true
original, err := json.Marshal(&v1alpha1.EphemeralRunner{})
if err != nil {
return fmt.Errorf("failed to marshal empty ephemeral runner: %w", err)
@@ -158,6 +154,11 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
return nil
}
func (w *Scaler) HandleJobCompleted(ctx context.Context, msg *scaleset.JobCompleted) error {
w.dirty = true
return nil
}
// HandleDesiredRunnerCount handles the desired runner count by scaling the ephemeral runner set.
// The function calculates the target runner count based on the minimum and maximum runner count configuration.
// If the target runner count is the same as the last patched count, it skips patching and returns nil.
@@ -165,8 +166,8 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
// The function then scales the ephemeral runner set by applying the merge patch.
// Finally, it logs the scaled ephemeral runner set details and returns nil if successful.
// If any error occurs during the process, it returns an error with a descriptive message.
func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count, jobsCompleted int) (int, error) {
patchID := w.setDesiredWorkerState(count, jobsCompleted)
func (w *Scaler) HandleDesiredRunnerCount(ctx context.Context, count int) (int, error) {
patchID := w.setDesiredWorkerState(count)
original, err := json.Marshal(
&v1alpha1.EphemeralRunnerSet{
@@ -183,13 +184,13 @@ func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count, jobsComple
patch, err := json.Marshal(
&v1alpha1.EphemeralRunnerSet{
Spec: v1alpha1.EphemeralRunnerSetSpec{
Replicas: w.lastPatch,
Replicas: w.targetRunners,
PatchID: patchID,
},
},
)
if err != nil {
w.logger.Error(err, "could not marshal patch ephemeral runner set")
w.logger.Error("could not marshal patch ephemeral runner set", "error", err.Error())
return 0, err
}
@@ -220,30 +221,31 @@ func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count, jobsComple
"name", w.config.EphemeralRunnerSetName,
"replicas", patchedEphemeralRunnerSet.Spec.Replicas,
)
return w.lastPatch, nil
return w.targetRunners, nil
}
// calculateDesiredState calculates the desired state of the worker based on the desired count and the the number of jobs completed.
func (w *Worker) setDesiredWorkerState(count, jobsCompleted int) int {
// Max runners should always be set by the resource builder either to the configured value,
// or the maximum int32 (resourcebuilder.newAutoScalingListener()).
targetRunnerCount := min(w.config.MinRunners+count, w.config.MaxRunners)
w.patchSeq++
desiredPatchID := w.patchSeq
func (w *Scaler) setDesiredWorkerState(count int) int {
dirty := w.dirty
w.dirty = false
if count == 0 && jobsCompleted == 0 { // empty batch
targetRunnerCount = max(w.lastPatch, targetRunnerCount)
if targetRunnerCount == w.config.MinRunners {
// We have an empty batch, and the last patch was the min runners.
// Since this is an empty batch, and we are at the min runners, they should all be idle.
// If controller created few more pods on accident (during scale down events),
// this situation allows the controller to scale down to the min runners.
// However, it is important to keep the patch sequence increasing so we don't ignore one batch.
desiredPatchID = 0
}
if w.patchSeq == math.MaxInt32 {
w.patchSeq = 0
}
w.patchSeq++
w.lastPatch = targetRunnerCount
targetRunnerCount := min(w.config.MinRunners+count, w.config.MaxRunners)
oldTargetRunners := w.targetRunners
w.targetRunners = targetRunnerCount
desiredPatchID := w.patchSeq
if !dirty && targetRunnerCount == oldTargetRunners && targetRunnerCount == w.config.MinRunners {
// If there were no events sent, and the target runner count
// is the same as the last patched count, we can force the state.
//
// TODO: see to remove w.config.MinRunenrs from the equation, as it is not relevant to the decision of whether to patch or not.
desiredPatchID = 0
}
w.logger.Info(
"Calculated target runner count",
@@ -251,8 +253,7 @@ func (w *Worker) setDesiredWorkerState(count, jobsCompleted int) int {
"decision", targetRunnerCount,
"min", w.config.MinRunners,
"max", w.config.MaxRunners,
"currentRunnerCount", w.lastPatch,
"jobsCompleted", jobsCompleted,
"currentRunnerCount", w.targetRunners,
)
return desiredPatchID

View File

@@ -1,326 +1,334 @@
package worker
package scaler
import (
"log/slog"
"math"
"testing"
"github.com/go-logr/logr"
"github.com/stretchr/testify/assert"
)
var discardLogger = slog.New(slog.DiscardHandler)
func TestSetDesiredWorkerState_MinMaxDefaults(t *testing.T) {
logger := logr.Discard()
newEmptyWorker := func() *Worker {
return &Worker{
newEmptyWorker := func() *Scaler {
return &Scaler{
config: Config{
MinRunners: 0,
MaxRunners: math.MaxInt32,
},
lastPatch: -1,
patchSeq: -1,
logger: &logger,
targetRunners: -1,
patchSeq: -1,
logger: discardLogger,
}
}
t.Run("init calculate with acquired 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(0, 0)
assert.Equal(t, 0, w.lastPatch)
patchID := w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 0, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
assert.Equal(t, 0, patchID)
})
t.Run("init calculate with acquired 1", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 0)
assert.Equal(t, 1, w.lastPatch)
patchID := w.setDesiredWorkerState(1)
assert.False(t, w.dirty)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
assert.Equal(t, 0, patchID)
})
t.Run("increment patch when job done", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 0)
patchID := w.setDesiredWorkerState(1)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
w.dirty = true
patchID = w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 1, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 0, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("increment patch when called with same parameters", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 0)
patchID := w.setDesiredWorkerState(1)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(1, 0)
patchID = w.setDesiredWorkerState(1)
assert.False(t, w.dirty)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("calculate desired scale when acquired > 0 and completed > 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 1)
w.dirty = true
patchID := w.setDesiredWorkerState(1)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("re-use the last state when acquired == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("adjust when acquired == 0 and completed == 1", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 1)
w.dirty = true
patchID := w.setDesiredWorkerState(1)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
assert.False(t, w.dirty)
w.dirty = true
patchID = w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 1, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 0, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
})
}
func TestSetDesiredWorkerState_MinSet(t *testing.T) {
logger := logr.Discard()
newEmptyWorker := func() *Worker {
return &Worker{
newEmptyWorker := func() *Scaler {
return &Scaler{
config: Config{
MinRunners: 1,
MaxRunners: math.MaxInt32,
},
lastPatch: -1,
patchSeq: -1,
logger: &logger,
targetRunners: -1,
patchSeq: -1,
logger: discardLogger,
}
}
t.Run("initial scale when acquired == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(0, 0)
patchID := w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("re-use the old state on count == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 1, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("request back to 0 on job done", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
patchID := w.setDesiredWorkerState(2)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
w.dirty = true
patchID = w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("desired patch is 0 but sequence continues on empty batch and min runners", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(3, 0)
patchID := w.setDesiredWorkerState(3)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
assert.Equal(t, 4, w.lastPatch)
assert.Equal(t, 4, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
patchID = w.setDesiredWorkerState(0, 3)
w.dirty = true
patchID = w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
// Empty batch on min runners
patchID = w.setDesiredWorkerState(0, 0)
patchID = w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID) // forcing the state
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 2, w.patchSeq)
})
}
func TestSetDesiredWorkerState_MaxSet(t *testing.T) {
logger := logr.Discard()
newEmptyWorker := func() *Worker {
return &Worker{
newEmptyWorker := func() *Scaler {
return &Scaler{
config: Config{
MinRunners: 0,
MaxRunners: 5,
},
lastPatch: -1,
patchSeq: -1,
logger: &logger,
targetRunners: -1,
patchSeq: -1,
logger: discardLogger,
}
}
t.Run("initial scale when acquired == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(0, 0)
patchID := w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 0, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("re-use the old state on count == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 1, patchID)
assert.Equal(t, 2, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("request back to 0 on job done", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
patchID := w.setDesiredWorkerState(2)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
w.dirty = true
patchID = w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 1, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 0, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("scale up to max when count > max", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(6, 0)
patchID := w.setDesiredWorkerState(6)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
assert.Equal(t, 5, w.lastPatch)
assert.Equal(t, 5, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("scale to max when count == max", func(t *testing.T) {
w := newEmptyWorker()
w.setDesiredWorkerState(5, 0)
assert.Equal(t, 5, w.lastPatch)
w.setDesiredWorkerState(5)
assert.False(t, w.dirty)
assert.Equal(t, 5, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("scale to max when count > max and completed > 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 0)
patchID := w.setDesiredWorkerState(1)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(6, 1)
w.dirty = true
patchID = w.setDesiredWorkerState(6)
assert.False(t, w.dirty)
assert.Equal(t, 1, patchID)
assert.Equal(t, 5, w.lastPatch)
assert.Equal(t, 5, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("scale back to 0 when count was > max", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(6, 0)
patchID := w.setDesiredWorkerState(6)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
w.dirty = true
patchID = w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 1, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 0, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("force 0 on empty batch and last patch == min runners", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(3, 0)
patchID := w.setDesiredWorkerState(3)
assert.Equal(t, 0, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 3, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
patchID = w.setDesiredWorkerState(0, 3)
w.dirty = true
patchID = w.setDesiredWorkerState(0)
assert.Equal(t, 1, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 0, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
// Empty batch on min runners
patchID = w.setDesiredWorkerState(0, 0)
patchID = w.setDesiredWorkerState(0)
assert.Equal(t, 0, patchID) // forcing the state
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 0, w.targetRunners)
assert.Equal(t, 2, w.patchSeq)
})
}
func TestSetDesiredWorkerState_MinMaxSet(t *testing.T) {
logger := logr.Discard()
newEmptyWorker := func() *Worker {
return &Worker{
newEmptyWorker := func() *Scaler {
return &Scaler{
config: Config{
MinRunners: 1,
MaxRunners: 3,
},
lastPatch: -1,
patchSeq: -1,
logger: &logger,
targetRunners: -1,
patchSeq: -1,
logger: discardLogger,
}
}
t.Run("initial scale when acquired == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(0, 0)
patchID := w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("re-use the old state on count == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 1, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("scale to min when count == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
patchID := w.setDesiredWorkerState(2)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
w.dirty = true
patchID = w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("scale up to max when count > max", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(4, 0)
patchID := w.setDesiredWorkerState(4)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 3, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("scale to max when count == max", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(3, 0)
patchID := w.setDesiredWorkerState(3)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 3, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("force 0 on empty batch and last patch == min runners", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(3, 0)
patchID := w.setDesiredWorkerState(3)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 3, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
patchID = w.setDesiredWorkerState(0, 3)
w.dirty = true
patchID = w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
// Empty batch on min runners
patchID = w.setDesiredWorkerState(0, 0)
patchID = w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID) // forcing the state
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 2, w.patchSeq)
})
}

View File

@@ -28,6 +28,9 @@ const (
LabelKeyKubernetesComponent = "app.kubernetes.io/component"
LabelKeyKubernetesVersion = "app.kubernetes.io/version"
// Well-known Kubernetes node labels
LabelKeyKubernetesOS = "kubernetes.io/os"
// Github labels
LabelKeyGitHubScaleSetName = "actions.github.com/scale-set-name"
LabelKeyGitHubScaleSetNamespace = "actions.github.com/scale-set-namespace"

View File

@@ -179,7 +179,7 @@ func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha
EphemeralRunnerSetName: autoscalingListener.Spec.EphemeralRunnerSetName,
MaxRunners: autoscalingListener.Spec.MaxRunners,
MinRunners: autoscalingListener.Spec.MinRunners,
RunnerScaleSetId: autoscalingListener.Spec.RunnerScaleSetId,
RunnerScaleSetID: autoscalingListener.Spec.RunnerScaleSetId,
RunnerScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
ServerRootCA: cert,
LogLevel: scaleSetListenerLogLevel,
@@ -244,6 +244,9 @@ func (b *ResourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
terminationGracePeriodSeconds := int64(60)
podSpec := corev1.PodSpec{
ServiceAccountName: serviceAccount.Name,
NodeSelector: map[string]string{
LabelKeyKubernetesOS: "linux",
},
Containers: []corev1.Container{
{
Name: autoscalingListenerContainerName,
@@ -353,13 +356,16 @@ func mergeListenerPodWithTemplate(pod *corev1.Pod, tmpl *corev1.PodTemplateSpec)
pod.Spec.ImagePullSecrets = tmpl.Spec.ImagePullSecrets
}
if tmpl.Spec.NodeSelector != nil {
pod.Spec.NodeSelector = tmpl.Spec.NodeSelector
}
pod.Spec.Volumes = append(pod.Spec.Volumes, tmpl.Spec.Volumes...)
pod.Spec.InitContainers = tmpl.Spec.InitContainers
pod.Spec.EphemeralContainers = tmpl.Spec.EphemeralContainers
pod.Spec.TerminationGracePeriodSeconds = tmpl.Spec.TerminationGracePeriodSeconds
pod.Spec.ActiveDeadlineSeconds = tmpl.Spec.ActiveDeadlineSeconds
pod.Spec.DNSPolicy = tmpl.Spec.DNSPolicy
pod.Spec.NodeSelector = tmpl.Spec.NodeSelector
pod.Spec.NodeName = tmpl.Spec.NodeName
pod.Spec.HostNetwork = tmpl.Spec.HostNetwork
pod.Spec.HostPID = tmpl.Spec.HostPID

View File

@@ -242,3 +242,111 @@ func TestOwnershipRelationships(t *testing.T) {
assert.Equal(t, true, *ownerRef.Controller, "Controller flag should be true")
assert.Equal(t, true, *ownerRef.BlockOwnerDeletion, "BlockOwnerDeletion flag should be true")
}
func TestListenerPodNodeSelector(t *testing.T) {
autoscalingRunnerSet := v1alpha1.AutoscalingRunnerSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-scale-set",
Namespace: "test-ns",
Labels: map[string]string{
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesVersion: "0.2.0",
},
Annotations: map[string]string{
runnerScaleSetIDAnnotationKey: "1",
AnnotationKeyGitHubRunnerGroupName: "test-group",
AnnotationKeyGitHubRunnerScaleSetName: "test-scale-set",
},
},
Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/org/repo",
},
}
b := ResourceBuilder{}
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(&autoscalingRunnerSet)
require.NoError(t, err)
listener, err := b.newAutoScalingListener(&autoscalingRunnerSet, ephemeralRunnerSet, autoscalingRunnerSet.Namespace, "test:latest", nil)
require.NoError(t, err)
listenerServiceAccount := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
}
t.Run("default listener pod has linux nodeSelector", func(t *testing.T) {
pod, err := b.newScaleSetListenerPod(listener, &corev1.Secret{}, listenerServiceAccount, nil)
require.NoError(t, err)
require.NotNil(t, pod.Spec.NodeSelector)
assert.Equal(t, "linux", pod.Spec.NodeSelector[LabelKeyKubernetesOS],
"listener pod should default to linux nodeSelector")
})
t.Run("nil listenerTemplate preserves linux nodeSelector", func(t *testing.T) {
listenerNoTemplate := listener.DeepCopy()
listenerNoTemplate.Spec.Template = nil
pod, err := b.newScaleSetListenerPod(listenerNoTemplate, &corev1.Secret{}, listenerServiceAccount, nil)
require.NoError(t, err)
require.NotNil(t, pod.Spec.NodeSelector)
assert.Equal(t, "linux", pod.Spec.NodeSelector[LabelKeyKubernetesOS],
"listener pod should keep linux nodeSelector when no template is provided")
})
t.Run("listenerTemplate with nil nodeSelector preserves linux default", func(t *testing.T) {
listenerWithTemplate := listener.DeepCopy()
listenerWithTemplate.Spec.Template = &corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
// NodeSelector intentionally nil
Tolerations: []corev1.Toleration{
{Key: "example.com/test", Operator: corev1.TolerationOpExists},
},
},
}
pod, err := b.newScaleSetListenerPod(listenerWithTemplate, &corev1.Secret{}, listenerServiceAccount, nil)
require.NoError(t, err)
require.NotNil(t, pod.Spec.NodeSelector,
"linux nodeSelector should not be cleared by template with nil nodeSelector")
assert.Equal(t, "linux", pod.Spec.NodeSelector[LabelKeyKubernetesOS])
assert.Len(t, pod.Spec.Tolerations, 1, "other template fields should still be applied")
})
t.Run("listenerTemplate with explicit nodeSelector overrides default", func(t *testing.T) {
listenerWithTemplate := listener.DeepCopy()
listenerWithTemplate.Spec.Template = &corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
NodeSelector: map[string]string{
LabelKeyKubernetesOS: "linux",
"custom-label/pool": "listeners",
},
},
}
pod, err := b.newScaleSetListenerPod(listenerWithTemplate, &corev1.Secret{}, listenerServiceAccount, nil)
require.NoError(t, err)
require.NotNil(t, pod.Spec.NodeSelector)
assert.Equal(t, "linux", pod.Spec.NodeSelector[LabelKeyKubernetesOS])
assert.Equal(t, "listeners", pod.Spec.NodeSelector["custom-label/pool"],
"explicit template nodeSelector should be applied")
})
t.Run("listenerTemplate with empty nodeSelector overrides default", func(t *testing.T) {
listenerWithTemplate := listener.DeepCopy()
listenerWithTemplate.Spec.Template = &corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
NodeSelector: map[string]string{},
},
}
pod, err := b.newScaleSetListenerPod(listenerWithTemplate, &corev1.Secret{}, listenerServiceAccount, nil)
require.NoError(t, err)
// An explicitly set empty map is non-nil, so it overrides the default.
// This is intentional: the user explicitly opted out of nodeSelector constraints.
assert.NotNil(t, pod.Spec.NodeSelector)
assert.Empty(t, pod.Spec.NodeSelector,
"explicitly empty nodeSelector should override the linux default")
})
}

View File

@@ -34,7 +34,6 @@ const (
// Header used to propagate capacity information to the back-end
const HeaderScaleSetMaxCapacity = "X-ScaleSetMaxCapacity"
//go:generate mockery --inpackage --name=ActionsService
type ActionsService interface {
GetRunnerScaleSet(ctx context.Context, runnerGroupId int, runnerScaleSetName string) (*RunnerScaleSet, error)
GetRunnerScaleSetById(ctx context.Context, runnerScaleSetId int) (*RunnerScaleSet, error)

View File

@@ -1,428 +0,0 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package actions
import (
context "context"
uuid "github.com/google/uuid"
mock "github.com/stretchr/testify/mock"
)
// MockActionsService is an autogenerated mock type for the ActionsService type
type MockActionsService struct {
mock.Mock
}
// AcquireJobs provides a mock function with given fields: ctx, runnerScaleSetId, messageQueueAccessToken, requestIds
func (_m *MockActionsService) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) {
ret := _m.Called(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
var r0 []int64
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, string, []int64) ([]int64, error)); ok {
return rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
}
if rf, ok := ret.Get(0).(func(context.Context, int, string, []int64) []int64); ok {
r0 = rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]int64)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int, string, []int64) error); ok {
r1 = rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CreateMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, owner
func (_m *MockActionsService) CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*RunnerScaleSetSession, error) {
ret := _m.Called(ctx, runnerScaleSetId, owner)
var r0 *RunnerScaleSetSession
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, string) (*RunnerScaleSetSession, error)); ok {
return rf(ctx, runnerScaleSetId, owner)
}
if rf, ok := ret.Get(0).(func(context.Context, int, string) *RunnerScaleSetSession); ok {
r0 = rf(ctx, runnerScaleSetId, owner)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*RunnerScaleSetSession)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int, string) error); ok {
r1 = rf(ctx, runnerScaleSetId, owner)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CreateRunnerScaleSet provides a mock function with given fields: ctx, runnerScaleSet
func (_m *MockActionsService) CreateRunnerScaleSet(ctx context.Context, runnerScaleSet *RunnerScaleSet) (*RunnerScaleSet, error) {
ret := _m.Called(ctx, runnerScaleSet)
var r0 *RunnerScaleSet
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *RunnerScaleSet) (*RunnerScaleSet, error)); ok {
return rf(ctx, runnerScaleSet)
}
if rf, ok := ret.Get(0).(func(context.Context, *RunnerScaleSet) *RunnerScaleSet); ok {
r0 = rf(ctx, runnerScaleSet)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*RunnerScaleSet)
}
}
if rf, ok := ret.Get(1).(func(context.Context, *RunnerScaleSet) error); ok {
r1 = rf(ctx, runnerScaleSet)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DeleteMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, messageId
func (_m *MockActionsService) DeleteMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, messageId int64) error {
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, messageId)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) error); ok {
r0 = rf(ctx, messageQueueUrl, messageQueueAccessToken, messageId)
} else {
r0 = ret.Error(0)
}
return r0
}
// DeleteMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
func (_m *MockActionsService) DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error {
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) error); ok {
r0 = rf(ctx, runnerScaleSetId, sessionId)
} else {
r0 = ret.Error(0)
}
return r0
}
// DeleteRunnerScaleSet provides a mock function with given fields: ctx, runnerScaleSetId
func (_m *MockActionsService) DeleteRunnerScaleSet(ctx context.Context, runnerScaleSetId int) error {
ret := _m.Called(ctx, runnerScaleSetId)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, int) error); ok {
r0 = rf(ctx, runnerScaleSetId)
} else {
r0 = ret.Error(0)
}
return r0
}
// GenerateJitRunnerConfig provides a mock function with given fields: ctx, jitRunnerSetting, scaleSetId
func (_m *MockActionsService) GenerateJitRunnerConfig(ctx context.Context, jitRunnerSetting *RunnerScaleSetJitRunnerSetting, scaleSetId int) (*RunnerScaleSetJitRunnerConfig, error) {
ret := _m.Called(ctx, jitRunnerSetting, scaleSetId)
var r0 *RunnerScaleSetJitRunnerConfig
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *RunnerScaleSetJitRunnerSetting, int) (*RunnerScaleSetJitRunnerConfig, error)); ok {
return rf(ctx, jitRunnerSetting, scaleSetId)
}
if rf, ok := ret.Get(0).(func(context.Context, *RunnerScaleSetJitRunnerSetting, int) *RunnerScaleSetJitRunnerConfig); ok {
r0 = rf(ctx, jitRunnerSetting, scaleSetId)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*RunnerScaleSetJitRunnerConfig)
}
}
if rf, ok := ret.Get(1).(func(context.Context, *RunnerScaleSetJitRunnerSetting, int) error); ok {
r1 = rf(ctx, jitRunnerSetting, scaleSetId)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetAcquirableJobs provides a mock function with given fields: ctx, runnerScaleSetId
func (_m *MockActionsService) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*AcquirableJobList, error) {
ret := _m.Called(ctx, runnerScaleSetId)
var r0 *AcquirableJobList
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int) (*AcquirableJobList, error)); ok {
return rf(ctx, runnerScaleSetId)
}
if rf, ok := ret.Get(0).(func(context.Context, int) *AcquirableJobList); ok {
r0 = rf(ctx, runnerScaleSetId)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*AcquirableJobList)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int) error); ok {
r1 = rf(ctx, runnerScaleSetId)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity
func (_m *MockActionsService) GetMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*RunnerScaleSetMessage, error) {
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
var r0 *RunnerScaleSetMessage
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64, int) (*RunnerScaleSetMessage, error)); ok {
return rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
}
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64, int) *RunnerScaleSetMessage); ok {
r0 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*RunnerScaleSetMessage)
}
}
if rf, ok := ret.Get(1).(func(context.Context, string, string, int64, int) error); ok {
r1 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetRunner provides a mock function with given fields: ctx, runnerId
func (_m *MockActionsService) GetRunner(ctx context.Context, runnerId int64) (*RunnerReference, error) {
ret := _m.Called(ctx, runnerId)
var r0 *RunnerReference
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int64) (*RunnerReference, error)); ok {
return rf(ctx, runnerId)
}
if rf, ok := ret.Get(0).(func(context.Context, int64) *RunnerReference); ok {
r0 = rf(ctx, runnerId)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*RunnerReference)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok {
r1 = rf(ctx, runnerId)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetRunnerByName provides a mock function with given fields: ctx, runnerName
func (_m *MockActionsService) GetRunnerByName(ctx context.Context, runnerName string) (*RunnerReference, error) {
ret := _m.Called(ctx, runnerName)
var r0 *RunnerReference
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) (*RunnerReference, error)); ok {
return rf(ctx, runnerName)
}
if rf, ok := ret.Get(0).(func(context.Context, string) *RunnerReference); ok {
r0 = rf(ctx, runnerName)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*RunnerReference)
}
}
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
r1 = rf(ctx, runnerName)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetRunnerGroupByName provides a mock function with given fields: ctx, runnerGroup
func (_m *MockActionsService) GetRunnerGroupByName(ctx context.Context, runnerGroup string) (*RunnerGroup, error) {
ret := _m.Called(ctx, runnerGroup)
var r0 *RunnerGroup
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) (*RunnerGroup, error)); ok {
return rf(ctx, runnerGroup)
}
if rf, ok := ret.Get(0).(func(context.Context, string) *RunnerGroup); ok {
r0 = rf(ctx, runnerGroup)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*RunnerGroup)
}
}
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
r1 = rf(ctx, runnerGroup)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetRunnerScaleSet provides a mock function with given fields: ctx, runnerGroupId, runnerScaleSetName
func (_m *MockActionsService) GetRunnerScaleSet(ctx context.Context, runnerGroupId int, runnerScaleSetName string) (*RunnerScaleSet, error) {
ret := _m.Called(ctx, runnerGroupId, runnerScaleSetName)
var r0 *RunnerScaleSet
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, string) (*RunnerScaleSet, error)); ok {
return rf(ctx, runnerGroupId, runnerScaleSetName)
}
if rf, ok := ret.Get(0).(func(context.Context, int, string) *RunnerScaleSet); ok {
r0 = rf(ctx, runnerGroupId, runnerScaleSetName)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*RunnerScaleSet)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int, string) error); ok {
r1 = rf(ctx, runnerGroupId, runnerScaleSetName)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetRunnerScaleSetById provides a mock function with given fields: ctx, runnerScaleSetId
func (_m *MockActionsService) GetRunnerScaleSetById(ctx context.Context, runnerScaleSetId int) (*RunnerScaleSet, error) {
ret := _m.Called(ctx, runnerScaleSetId)
var r0 *RunnerScaleSet
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int) (*RunnerScaleSet, error)); ok {
return rf(ctx, runnerScaleSetId)
}
if rf, ok := ret.Get(0).(func(context.Context, int) *RunnerScaleSet); ok {
r0 = rf(ctx, runnerScaleSetId)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*RunnerScaleSet)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int) error); ok {
r1 = rf(ctx, runnerScaleSetId)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// RefreshMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
func (_m *MockActionsService) RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*RunnerScaleSetSession, error) {
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
var r0 *RunnerScaleSetSession
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) (*RunnerScaleSetSession, error)); ok {
return rf(ctx, runnerScaleSetId, sessionId)
}
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) *RunnerScaleSetSession); ok {
r0 = rf(ctx, runnerScaleSetId, sessionId)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*RunnerScaleSetSession)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int, *uuid.UUID) error); ok {
r1 = rf(ctx, runnerScaleSetId, sessionId)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// RemoveRunner provides a mock function with given fields: ctx, runnerId
func (_m *MockActionsService) RemoveRunner(ctx context.Context, runnerId int64) error {
ret := _m.Called(ctx, runnerId)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
r0 = rf(ctx, runnerId)
} else {
r0 = ret.Error(0)
}
return r0
}
// SetUserAgent provides a mock function with given fields: info
func (_m *MockActionsService) SetUserAgent(info UserAgentInfo) {
_m.Called(info)
}
// UpdateRunnerScaleSet provides a mock function with given fields: ctx, runnerScaleSetId, runnerScaleSet
func (_m *MockActionsService) UpdateRunnerScaleSet(ctx context.Context, runnerScaleSetId int, runnerScaleSet *RunnerScaleSet) (*RunnerScaleSet, error) {
ret := _m.Called(ctx, runnerScaleSetId, runnerScaleSet)
var r0 *RunnerScaleSet
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, *RunnerScaleSet) (*RunnerScaleSet, error)); ok {
return rf(ctx, runnerScaleSetId, runnerScaleSet)
}
if rf, ok := ret.Get(0).(func(context.Context, int, *RunnerScaleSet) *RunnerScaleSet); ok {
r0 = rf(ctx, runnerScaleSetId, runnerScaleSet)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*RunnerScaleSet)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int, *RunnerScaleSet) error); ok {
r1 = rf(ctx, runnerScaleSetId, runnerScaleSet)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewMockActionsService creates a new instance of MockActionsService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockActionsService(t interface {
mock.TestingT
Cleanup(func())
}) *MockActionsService {
mock := &MockActionsService{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -1,108 +0,0 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package actions
import (
context "context"
mock "github.com/stretchr/testify/mock"
)
// MockSessionService is an autogenerated mock type for the SessionService type
type MockSessionService struct {
mock.Mock
}
// AcquireJobs provides a mock function with given fields: ctx, requestIds
func (_m *MockSessionService) AcquireJobs(ctx context.Context, requestIds []int64) ([]int64, error) {
ret := _m.Called(ctx, requestIds)
var r0 []int64
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, []int64) ([]int64, error)); ok {
return rf(ctx, requestIds)
}
if rf, ok := ret.Get(0).(func(context.Context, []int64) []int64); ok {
r0 = rf(ctx, requestIds)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]int64)
}
}
if rf, ok := ret.Get(1).(func(context.Context, []int64) error); ok {
r1 = rf(ctx, requestIds)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Close provides a mock function with given fields:
func (_m *MockSessionService) Close() error {
ret := _m.Called()
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
} else {
r0 = ret.Error(0)
}
return r0
}
// DeleteMessage provides a mock function with given fields: ctx, messageId
func (_m *MockSessionService) DeleteMessage(ctx context.Context, messageId int64) error {
ret := _m.Called(ctx, messageId)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
r0 = rf(ctx, messageId)
} else {
r0 = ret.Error(0)
}
return r0
}
// GetMessage provides a mock function with given fields: ctx, lastMessageId, maxCapacity
func (_m *MockSessionService) GetMessage(ctx context.Context, lastMessageId int64, maxCapacity int) (*RunnerScaleSetMessage, error) {
ret := _m.Called(ctx, lastMessageId, maxCapacity)
var r0 *RunnerScaleSetMessage
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int64, int) (*RunnerScaleSetMessage, error)); ok {
return rf(ctx, lastMessageId, maxCapacity)
}
if rf, ok := ret.Get(0).(func(context.Context, int64, int) *RunnerScaleSetMessage); ok {
r0 = rf(ctx, lastMessageId, maxCapacity)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*RunnerScaleSetMessage)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int64, int) error); ok {
r1 = rf(ctx, lastMessageId, maxCapacity)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewMockSessionService creates a new instance of MockSessionService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockSessionService(t interface {
mock.TestingT
Cleanup(func())
}) *MockSessionService {
mock := &MockSessionService{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

1539
github/actions/mocks_test.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -5,7 +5,6 @@ import (
"io"
)
//go:generate mockery --inpackage --name=SessionService
type SessionService interface {
GetMessage(ctx context.Context, lastMessageId int64, maxCapacity int) (*RunnerScaleSetMessage, error)
DeleteMessage(ctx context.Context, messageId int64) error

31
go.mod
View File

@@ -1,11 +1,12 @@
module github.com/actions/actions-runner-controller
go 1.25.1
go 1.25.3
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.4.0
github.com/actions/scaleset v0.1.1-0.20260218224657-feb84c6d04fb
github.com/bradleyfalzon/ghinstallation/v2 v2.17.0
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/evanphx/json-patch v5.9.11+incompatible
@@ -86,11 +87,13 @@ require (
github.com/aws/smithy-go v1.23.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/boombuler/barcode v1.1.0 // indirect
github.com/brunoga/deep v1.2.4 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cloudflare/circl v1.6.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
github.com/emicklei/go-restful/v3 v3.13.0 // indirect
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
github.com/fatih/structs v1.1.0 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-errors/errors v1.5.1 // indirect
@@ -111,6 +114,7 @@ require (
github.com/go-openapi/swag/yamlutils v0.25.1 // indirect
github.com/go-sql-driver/mysql v1.9.3 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
github.com/gonvenience/bunt v1.4.2 // indirect
@@ -130,19 +134,33 @@ require (
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/homeport/dyff v1.10.2 // indirect
github.com/huandu/xstrings v1.5.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
github.com/jackc/pgx/v5 v5.7.6 // indirect
github.com/jackc/puddle/v2 v2.2.2 // indirect
github.com/jedib0t/go-pretty/v6 v6.6.7 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/knadh/koanf/maps v0.1.2 // indirect
github.com/knadh/koanf/parsers/yaml v0.1.0 // indirect
github.com/knadh/koanf/providers/env v1.0.0 // indirect
github.com/knadh/koanf/providers/file v1.1.2 // indirect
github.com/knadh/koanf/providers/posflag v0.1.0 // indirect
github.com/knadh/koanf/providers/structs v0.1.0 // indirect
github.com/knadh/koanf/v2 v2.3.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/lucasb-eyer/go-colorful v1.3.0 // indirect
github.com/mattn/go-ciede2000 v0.0.0-20170301095244-782e8c62fec3 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/mattn/go-zglob v0.0.6 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-ps v1.0.0 // indirect
github.com/mitchellh/hashstructure v1.1.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/spdystream v0.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
@@ -154,14 +172,21 @@ require (
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.66.1 // indirect
github.com/prometheus/procfs v0.17.0 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/rs/zerolog v1.33.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sergi/go-diff v1.4.0 // indirect
github.com/spf13/cobra v1.10.1 // indirect
github.com/spf13/pflag v1.0.10 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/texttheater/golang-levenshtein v1.0.1 // indirect
github.com/urfave/cli/v2 v2.27.7 // indirect
github.com/vektra/mockery/v3 v3.6.1 // indirect
github.com/virtuald/go-ordered-json v0.0.0-20170621173500-b18e6e673d74 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
@@ -171,7 +196,7 @@ require (
golang.org/x/sys v0.39.0 // indirect
golang.org/x/term v0.38.0 // indirect
golang.org/x/text v0.32.0 // indirect
golang.org/x/time v0.13.0 // indirect
golang.org/x/time v0.14.0 // indirect
golang.org/x/tools v0.39.0 // indirect
google.golang.org/protobuf v1.36.10 // indirect
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
@@ -185,4 +210,6 @@ require (
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
)
tool github.com/vektra/mockery/v3
replace github.com/gregjones/httpcache => github.com/actions-runner-controller/httpcache v0.2.0

62
go.sum
View File

@@ -25,6 +25,8 @@ github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBi
github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
github.com/actions-runner-controller/httpcache v0.2.0 h1:hCNvYuVPJ2xxYBymqBvH0hSiQpqz4PHF/LbU3XghGNI=
github.com/actions-runner-controller/httpcache v0.2.0/go.mod h1:JLu9/2M/btPz1Zu/vTZ71XzukQHn2YeISPmJoM5exBI=
github.com/actions/scaleset v0.1.1-0.20260218224657-feb84c6d04fb h1:9jQ9/kHm00UTvZf5MiQcZgIVounynwFEhh0wCV3Ts00=
github.com/actions/scaleset v0.1.1-0.20260218224657-feb84c6d04fb/go.mod h1:ncR5vzCCTUSyLgvclAtZ5dRBgF6qwA2nbTfTXmOJp84=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/aws/aws-sdk-go-v2 v1.39.2 h1:EJLg8IdbzgeD7xgvZ+I8M1e0fL0ptn/M47lianzth0I=
@@ -106,10 +108,14 @@ github.com/boombuler/barcode v1.1.0 h1:ChaYjBR63fr4LFyGn8E8nt7dBSt3MiU3zMOZqFvVk
github.com/boombuler/barcode v1.1.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bradleyfalzon/ghinstallation/v2 v2.17.0 h1:SmbUK/GxpAspRjSQbB6ARvH+ArzlNzTtHydNyXUQ6zg=
github.com/bradleyfalzon/ghinstallation/v2 v2.17.0/go.mod h1:vuD/xvJT9Y+ZVZRv4HQ42cMyPFIYqpc7AbB4Gvt/DlY=
github.com/brunoga/deep v1.2.4 h1:Aj9E9oUbE+ccbyh35VC/NHlzzjfIVU69BXu2mt2LmL8=
github.com/brunoga/deep v1.2.4/go.mod h1:GDV6dnXqn80ezsLSZ5Wlv1PdKAWAO4L5PnKYtv2dgaI=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -124,6 +130,8 @@ github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjT
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
@@ -177,8 +185,11 @@ github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI6
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
@@ -249,6 +260,10 @@ github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3
github.com/homeport/dyff v1.10.2 h1:XyB+D0KVwjbUFTZYIkvPtsImwkfh+ObH2CEdEHTqdr4=
github.com/homeport/dyff v1.10.2/go.mod h1:0kIjL/JOGaXigzrLY6kcl5esSStbAa99r6GzEvr7lrs=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
@@ -257,6 +272,8 @@ github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk=
github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jedib0t/go-pretty/v6 v6.6.7 h1:m+LbHpm0aIAPLzLbMfn8dc3Ht8MW7lsSO4MPItz/Uuo=
github.com/jedib0t/go-pretty/v6 v6.6.7/go.mod h1:YwC5CE4fJ1HFUDeivSV1r//AmANFHyqczZk+U6BDALU=
github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE=
github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
@@ -269,6 +286,20 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo=
github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
github.com/knadh/koanf/parsers/yaml v0.1.0 h1:ZZ8/iGfRLvKSaMEECEBPM1HQslrZADk8fP1XFUxVI5w=
github.com/knadh/koanf/parsers/yaml v0.1.0/go.mod h1:cvbUDC7AL23pImuQP0oRw/hPuccrNBS2bps8asS0CwY=
github.com/knadh/koanf/providers/env v1.0.0 h1:ufePaI9BnWH+ajuxGGiJ8pdTG0uLEUWC7/HDDPGLah0=
github.com/knadh/koanf/providers/env v1.0.0/go.mod h1:mzFyRZueYhb37oPmC1HAv/oGEEuyvJDA98r3XAa8Gak=
github.com/knadh/koanf/providers/file v1.1.2 h1:aCC36YGOgV5lTtAFz2qkgtWdeQsgfxUkxDOe+2nQY3w=
github.com/knadh/koanf/providers/file v1.1.2/go.mod h1:/faSBcv2mxPVjFrXck95qeoyoZ5myJ6uxN8OOVNJJCI=
github.com/knadh/koanf/providers/posflag v0.1.0 h1:mKJlLrKPcAP7Ootf4pBZWJ6J+4wHYujwipe7Ie3qW6U=
github.com/knadh/koanf/providers/posflag v0.1.0/go.mod h1:SYg03v/t8ISBNrMBRMlojH8OsKowbkXV7giIbBVgbz0=
github.com/knadh/koanf/providers/structs v0.1.0 h1:wJRteCNn1qvLtE5h8KQBvLJovidSdntfdyIbbCzEyE0=
github.com/knadh/koanf/providers/structs v0.1.0/go.mod h1:sw2YZ3txUcqA3Z27gPlmmBzWn1h8Nt9O6EP/91MkcWE=
github.com/knadh/koanf/v2 v2.3.0 h1:Qg076dDRFHvqnKG97ZEsi9TAg2/nFTa9hCdcSa1lvlM=
github.com/knadh/koanf/v2 v2.3.0/go.mod h1:gRb40VRAbd4iJMYYD5IxZ6hfuopFcXBpc9bbQpZwo28=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
@@ -286,18 +317,26 @@ github.com/mattn/go-ciede2000 v0.0.0-20170301095244-782e8c62fec3 h1:BXxTozrOU8zg
github.com/mattn/go-ciede2000 v0.0.0-20170301095244-782e8c62fec3/go.mod h1:x1uk6vxTiVuNt6S5R2UYgdhpj3oKojXvOXauHZ7dEnI=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-zglob v0.0.6 h1:mP8RnmCgho4oaUYDIDn6GNxYk+qJGUs8fJLn+twYj2A=
github.com/mattn/go-zglob v0.0.6/go.mod h1:MxxjyoXXnMxfIpxTK2GAkw1w8glPsQILx3N5wrKakiY=
github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE=
github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc=
github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0=
github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/6d8ulp4AwfLKrmA=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -339,12 +378,21 @@ github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9Z
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -370,10 +418,18 @@ github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU=
github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4=
github.com/vektra/mockery/v3 v3.6.1 h1:YyqAXihdNML8y6SJnvPKYr+2HAHvBjdvqFu/fMYlX8g=
github.com/vektra/mockery/v3 v3.6.1/go.mod h1:Oti3Df0WP8wwT31yuVri3QNsDeMUQU5Q4QEg8EabaBw=
github.com/virtuald/go-ordered-json v0.0.0-20170621173500-b18e6e673d74 h1:JwtAtbp7r/7QSyGz8mKUbYJBg2+6Cd7OjM8o/GNOcVo=
github.com/virtuald/go-ordered-json v0.0.0-20170621173500-b18e6e673d74/go.mod h1:RmMWU37GKR2s6pgrIEB4ixgpVCt/cf7dnJv3fuH1J1c=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 h1:FnBeRrxr7OU4VvAzt5X7s6266i6cSVkkFPS0TuXWbIg=
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -424,8 +480,10 @@ golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
@@ -434,8 +492,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=