Switch client to scaleset library for the listener and update mocks (#4383)

This commit is contained in:
Nikola Jokic
2026-02-24 14:17:31 +01:00
committed by GitHub
parent c6e4c94a6a
commit 8b7fd9ffef
30 changed files with 1129 additions and 3309 deletions

View File

@@ -1,143 +0,0 @@
package app
import (
"context"
"errors"
"fmt"
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
"github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
"github.com/actions/actions-runner-controller/cmd/ghalistener/worker"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
"golang.org/x/sync/errgroup"
)
// App is responsible for initializing required components and running the app.
type App struct {
// configured fields
config *config.Config
logger logr.Logger
// initialized fields
listener Listener
worker Worker
metrics metrics.ServerExporter
}
//go:generate mockery
type Listener interface {
Listen(ctx context.Context, handler listener.Handler) error
}
//go:generate mockery
type Worker interface {
HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error
HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error)
}
func New(config config.Config) (*App, error) {
if err := config.Validate(); err != nil {
return nil, fmt.Errorf("failed to validate config: %w", err)
}
app := &App{
config: &config,
}
ghConfig, err := actions.ParseGitHubConfigFromURL(config.ConfigureUrl)
if err != nil {
return nil, fmt.Errorf("failed to parse GitHub config from URL: %w", err)
}
{
logger, err := config.Logger()
if err != nil {
return nil, fmt.Errorf("failed to create logger: %w", err)
}
app.logger = logger.WithName("listener-app")
}
actionsClient, err := config.ActionsClient(app.logger)
if err != nil {
return nil, fmt.Errorf("failed to create actions client: %w", err)
}
if config.MetricsAddr != "" {
app.metrics = metrics.NewExporter(metrics.ExporterConfig{
ScaleSetName: config.EphemeralRunnerSetName,
ScaleSetNamespace: config.EphemeralRunnerSetNamespace,
Enterprise: ghConfig.Enterprise,
Organization: ghConfig.Organization,
Repository: ghConfig.Repository,
ServerAddr: config.MetricsAddr,
ServerEndpoint: config.MetricsEndpoint,
Metrics: config.Metrics,
Logger: app.logger.WithName("metrics exporter"),
})
}
worker, err := worker.New(
worker.Config{
EphemeralRunnerSetNamespace: config.EphemeralRunnerSetNamespace,
EphemeralRunnerSetName: config.EphemeralRunnerSetName,
MaxRunners: config.MaxRunners,
MinRunners: config.MinRunners,
},
worker.WithLogger(app.logger.WithName("worker")),
)
if err != nil {
return nil, fmt.Errorf("failed to create new kubernetes worker: %w", err)
}
app.worker = worker
listener, err := listener.New(listener.Config{
Client: actionsClient,
ScaleSetID: app.config.RunnerScaleSetId,
MinRunners: app.config.MinRunners,
MaxRunners: app.config.MaxRunners,
Logger: app.logger.WithName("listener"),
Metrics: app.metrics,
})
if err != nil {
return nil, fmt.Errorf("failed to create new listener: %w", err)
}
app.listener = listener
app.logger.Info("app initialized")
return app, nil
}
func (app *App) Run(ctx context.Context) error {
var errs []error
if app.worker == nil {
errs = append(errs, fmt.Errorf("worker not initialized"))
}
if app.listener == nil {
errs = append(errs, fmt.Errorf("listener not initialized"))
}
if err := errors.Join(errs...); err != nil {
return fmt.Errorf("app not initialized: %w", err)
}
g, ctx := errgroup.WithContext(ctx)
metricsCtx, cancelMetrics := context.WithCancelCause(ctx)
g.Go(func() error {
app.logger.Info("Starting listener")
listnerErr := app.listener.Listen(ctx, app.worker)
cancelMetrics(fmt.Errorf("Listener exited: %w", listnerErr))
return listnerErr
})
if app.metrics != nil {
g.Go(func() error {
app.logger.Info("Starting metrics server")
return app.metrics.ListenAndServe(metricsCtx)
})
}
return g.Wait()
}

View File

@@ -1,85 +0,0 @@
package app
import (
"context"
"errors"
"testing"
appmocks "github.com/actions/actions-runner-controller/cmd/ghalistener/app/mocks"
"github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
metricsMocks "github.com/actions/actions-runner-controller/cmd/ghalistener/metrics/mocks"
"github.com/actions/actions-runner-controller/cmd/ghalistener/worker"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
func TestApp_Run(t *testing.T) {
t.Parallel()
t.Run("ListenerWorkerGuard", func(t *testing.T) {
invalidApps := []*App{
{},
{worker: &worker.Worker{}},
{listener: &listener.Listener{}},
}
for _, app := range invalidApps {
assert.Error(t, app.Run(context.Background()))
}
})
t.Run("ExitsOnListenerError", func(t *testing.T) {
listener := appmocks.NewMockListener(t)
worker := appmocks.NewMockWorker(t)
listener.On("Listen", mock.Anything, mock.Anything).Return(errors.New("listener error")).Once()
app := &App{
listener: listener,
worker: worker,
}
err := app.Run(context.Background())
assert.Error(t, err)
})
t.Run("ExitsOnListenerNil", func(t *testing.T) {
listener := appmocks.NewMockListener(t)
worker := appmocks.NewMockWorker(t)
listener.On("Listen", mock.Anything, mock.Anything).Return(nil).Once()
app := &App{
listener: listener,
worker: worker,
}
err := app.Run(context.Background())
assert.NoError(t, err)
})
t.Run("CancelListenerOnMetricsServerError", func(t *testing.T) {
listener := appmocks.NewMockListener(t)
worker := appmocks.NewMockWorker(t)
metrics := metricsMocks.NewServerPublisher(t)
ctx := context.Background()
listener.On("Listen", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
ctx := args.Get(0).(context.Context)
go func() {
<-ctx.Done()
}()
}).Return(nil).Once()
metrics.On("ListenAndServe", mock.Anything).Return(errors.New("metrics server error")).Once()
app := &App{
listener: listener,
worker: worker,
metrics: metrics,
}
err := app.Run(ctx)
assert.Error(t, err)
})
}

View File

@@ -1,96 +0,0 @@
// Code generated by mockery; DO NOT EDIT.
// github.com/vektra/mockery
// template: testify
package mocks
import (
"context"
"github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
mock "github.com/stretchr/testify/mock"
)
// NewMockListener creates a new instance of MockListener. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockListener(t interface {
mock.TestingT
Cleanup(func())
}) *MockListener {
mock := &MockListener{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
// MockListener is an autogenerated mock type for the Listener type
type MockListener struct {
mock.Mock
}
type MockListener_Expecter struct {
mock *mock.Mock
}
func (_m *MockListener) EXPECT() *MockListener_Expecter {
return &MockListener_Expecter{mock: &_m.Mock}
}
// Listen provides a mock function for the type MockListener
func (_mock *MockListener) Listen(ctx context.Context, handler listener.Handler) error {
ret := _mock.Called(ctx, handler)
if len(ret) == 0 {
panic("no return value specified for Listen")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func(context.Context, listener.Handler) error); ok {
r0 = returnFunc(ctx, handler)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockListener_Listen_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Listen'
type MockListener_Listen_Call struct {
*mock.Call
}
// Listen is a helper method to define mock.On call
// - ctx context.Context
// - handler listener.Handler
func (_e *MockListener_Expecter) Listen(ctx interface{}, handler interface{}) *MockListener_Listen_Call {
return &MockListener_Listen_Call{Call: _e.mock.On("Listen", ctx, handler)}
}
func (_c *MockListener_Listen_Call) Run(run func(ctx context.Context, handler listener.Handler)) *MockListener_Listen_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 listener.Handler
if args[1] != nil {
arg1 = args[1].(listener.Handler)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockListener_Listen_Call) Return(err error) *MockListener_Listen_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockListener_Listen_Call) RunAndReturn(run func(ctx context.Context, handler listener.Handler) error) *MockListener_Listen_Call {
_c.Call.Return(run)
return _c
}

View File

@@ -1,168 +0,0 @@
// Code generated by mockery; DO NOT EDIT.
// github.com/vektra/mockery
// template: testify
package mocks
import (
"context"
"github.com/actions/actions-runner-controller/github/actions"
mock "github.com/stretchr/testify/mock"
)
// NewMockWorker creates a new instance of MockWorker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockWorker(t interface {
mock.TestingT
Cleanup(func())
}) *MockWorker {
mock := &MockWorker{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
// MockWorker is an autogenerated mock type for the Worker type
type MockWorker struct {
mock.Mock
}
type MockWorker_Expecter struct {
mock *mock.Mock
}
func (_m *MockWorker) EXPECT() *MockWorker_Expecter {
return &MockWorker_Expecter{mock: &_m.Mock}
}
// HandleDesiredRunnerCount provides a mock function for the type MockWorker
func (_mock *MockWorker) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error) {
ret := _mock.Called(ctx, count, jobsCompleted)
if len(ret) == 0 {
panic("no return value specified for HandleDesiredRunnerCount")
}
var r0 int
var r1 error
if returnFunc, ok := ret.Get(0).(func(context.Context, int, int) (int, error)); ok {
return returnFunc(ctx, count, jobsCompleted)
}
if returnFunc, ok := ret.Get(0).(func(context.Context, int, int) int); ok {
r0 = returnFunc(ctx, count, jobsCompleted)
} else {
r0 = ret.Get(0).(int)
}
if returnFunc, ok := ret.Get(1).(func(context.Context, int, int) error); ok {
r1 = returnFunc(ctx, count, jobsCompleted)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockWorker_HandleDesiredRunnerCount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HandleDesiredRunnerCount'
type MockWorker_HandleDesiredRunnerCount_Call struct {
*mock.Call
}
// HandleDesiredRunnerCount is a helper method to define mock.On call
// - ctx context.Context
// - count int
// - jobsCompleted int
func (_e *MockWorker_Expecter) HandleDesiredRunnerCount(ctx interface{}, count interface{}, jobsCompleted interface{}) *MockWorker_HandleDesiredRunnerCount_Call {
return &MockWorker_HandleDesiredRunnerCount_Call{Call: _e.mock.On("HandleDesiredRunnerCount", ctx, count, jobsCompleted)}
}
func (_c *MockWorker_HandleDesiredRunnerCount_Call) Run(run func(ctx context.Context, count int, jobsCompleted int)) *MockWorker_HandleDesiredRunnerCount_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 int
if args[1] != nil {
arg1 = args[1].(int)
}
var arg2 int
if args[2] != nil {
arg2 = args[2].(int)
}
run(
arg0,
arg1,
arg2,
)
})
return _c
}
func (_c *MockWorker_HandleDesiredRunnerCount_Call) Return(n int, err error) *MockWorker_HandleDesiredRunnerCount_Call {
_c.Call.Return(n, err)
return _c
}
func (_c *MockWorker_HandleDesiredRunnerCount_Call) RunAndReturn(run func(ctx context.Context, count int, jobsCompleted int) (int, error)) *MockWorker_HandleDesiredRunnerCount_Call {
_c.Call.Return(run)
return _c
}
// HandleJobStarted provides a mock function for the type MockWorker
func (_mock *MockWorker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
ret := _mock.Called(ctx, jobInfo)
if len(ret) == 0 {
panic("no return value specified for HandleJobStarted")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func(context.Context, *actions.JobStarted) error); ok {
r0 = returnFunc(ctx, jobInfo)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockWorker_HandleJobStarted_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HandleJobStarted'
type MockWorker_HandleJobStarted_Call struct {
*mock.Call
}
// HandleJobStarted is a helper method to define mock.On call
// - ctx context.Context
// - jobInfo *actions.JobStarted
func (_e *MockWorker_Expecter) HandleJobStarted(ctx interface{}, jobInfo interface{}) *MockWorker_HandleJobStarted_Call {
return &MockWorker_HandleJobStarted_Call{Call: _e.mock.On("HandleJobStarted", ctx, jobInfo)}
}
func (_c *MockWorker_HandleJobStarted_Call) Run(run func(ctx context.Context, jobInfo *actions.JobStarted)) *MockWorker_HandleJobStarted_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 *actions.JobStarted
if args[1] != nil {
arg1 = args[1].(*actions.JobStarted)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockWorker_HandleJobStarted_Call) Return(err error) *MockWorker_HandleJobStarted_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockWorker_HandleJobStarted_Call) RunAndReturn(run func(ctx context.Context, jobInfo *actions.JobStarted) error) *MockWorker_HandleJobStarted_Call {
_c.Call.Return(run)
return _c
}

View File

@@ -5,21 +5,23 @@ import (
"crypto/x509"
"encoding/json"
"fmt"
"log/slog"
"net/http"
"net/url"
"os"
"strings"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
"github.com/actions/actions-runner-controller/build"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/logging"
"github.com/actions/actions-runner-controller/vault"
"github.com/actions/actions-runner-controller/vault/azurekeyvault"
"github.com/go-logr/logr"
"github.com/actions/scaleset"
"golang.org/x/net/http/httpproxy"
)
const appName = "ghalistener"
type Config struct {
ConfigureUrl string `json:"configure_url"`
VaultType vault.VaultType `json:"vault_type"`
@@ -34,7 +36,7 @@ type Config struct {
EphemeralRunnerSetName string `json:"ephemeral_runner_set_name"`
MaxRunners int `json:"max_runners"`
MinRunners int `json:"min_runners"`
RunnerScaleSetId int `json:"runner_scale_set_id"`
RunnerScaleSetID int `json:"runner_scale_set_id"`
RunnerScaleSetName string `json:"runner_scale_set_name"`
ServerRootCA string `json:"server_root_ca"`
LogLevel string `json:"log_level"`
@@ -108,8 +110,8 @@ func (c *Config) Validate() error {
return fmt.Errorf("EphemeralRunnerSetNamespace %q or EphemeralRunnerSetName %q is missing", c.EphemeralRunnerSetNamespace, c.EphemeralRunnerSetName)
}
if c.RunnerScaleSetId == 0 {
return fmt.Errorf(`RunnerScaleSetId "%d" is missing`, c.RunnerScaleSetId)
if c.RunnerScaleSetID == 0 {
return fmt.Errorf(`RunnerScaleSetId "%d" is missing`, c.RunnerScaleSetID)
}
if c.MaxRunners < c.MinRunners {
@@ -134,40 +136,51 @@ func (c *Config) Validate() error {
return nil
}
func (c *Config) Logger() (logr.Logger, error) {
logLevel := string(logging.LogLevelDebug)
if c.LogLevel != "" {
logLevel = c.LogLevel
func (c *Config) Logger() (*slog.Logger, error) {
var lvl slog.Level
switch strings.ToLower(c.LogLevel) {
case "debug":
lvl = slog.LevelDebug
case "info":
lvl = slog.LevelInfo
case "warn":
lvl = slog.LevelWarn
case "error":
lvl = slog.LevelError
default:
return nil, fmt.Errorf("invalid log level: %s", c.LogLevel)
}
logFormat := string(logging.LogFormatText)
if c.LogFormat != "" {
logFormat = c.LogFormat
var logger *slog.Logger
switch c.LogFormat {
case "json":
logger = slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{
AddSource: true,
Level: lvl,
}))
case "text":
logger = slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
AddSource: true,
Level: lvl,
}))
default:
return nil, fmt.Errorf("invalid log format: %s", c.LogFormat)
}
logger, err := logging.NewLogger(logLevel, logFormat)
if err != nil {
return logr.Logger{}, fmt.Errorf("NewLogger failed: %w", err)
}
return logger, nil
return logger.With("app", appName), nil
}
func (c *Config) ActionsClient(logger logr.Logger, clientOptions ...actions.ClientOption) (*actions.Client, error) {
var creds actions.ActionsAuth
switch c.Token {
case "":
creds.AppCreds = &actions.GitHubAppAuth{
AppID: c.AppID,
AppInstallationID: c.AppInstallationID,
AppPrivateKey: c.AppPrivateKey,
}
default:
creds.Token = c.Token
func (c *Config) ActionsClient(logger *slog.Logger, clientOptions ...scaleset.HTTPOption) (*scaleset.Client, error) {
systemInfo := scaleset.SystemInfo{
System: "actions-runner-controller",
Version: build.Version,
CommitSHA: build.CommitSHA,
ScaleSetID: c.RunnerScaleSetID,
Subsystem: appName,
}
options := append([]actions.ClientOption{
actions.WithLogger(logger),
options := append([]scaleset.HTTPOption{
scaleset.WithLogger(logger),
}, clientOptions...)
if c.ServerRootCA != "" {
@@ -181,31 +194,47 @@ func (c *Config) ActionsClient(logger logr.Logger, clientOptions ...actions.Clie
return nil, fmt.Errorf("failed to parse root certificate")
}
options = append(options, actions.WithRootCAs(pool))
options = append(options, scaleset.WithRootCAs(pool))
}
proxyFunc := httpproxy.FromEnvironment().ProxyFunc()
options = append(options, actions.WithProxy(func(req *http.Request) (*url.URL, error) {
options = append(options, scaleset.WithProxy(func(req *http.Request) (*url.URL, error) {
return proxyFunc(req.URL)
}))
client, err := actions.NewClient(c.ConfigureUrl, &creds, options...)
if err != nil {
return nil, fmt.Errorf("failed to create actions client: %w", err)
var client *scaleset.Client
switch c.Token {
case "":
c, err := scaleset.NewClientWithGitHubApp(
scaleset.ClientWithGitHubAppConfig{
GitHubConfigURL: c.ConfigureUrl,
GitHubAppAuth: scaleset.GitHubAppAuth{
ClientID: c.AppConfig.AppID,
InstallationID: c.AppConfig.AppInstallationID,
PrivateKey: c.AppConfig.AppPrivateKey,
},
SystemInfo: systemInfo,
},
options...,
)
if err != nil {
return nil, fmt.Errorf("failed to instantiate client with GitHub App auth: %w", err)
}
client = c
default:
c, err := scaleset.NewClientWithPersonalAccessToken(
scaleset.NewClientWithPersonalAccessTokenConfig{
GitHubConfigURL: c.ConfigureUrl,
PersonalAccessToken: c.Token,
SystemInfo: systemInfo,
},
options...,
)
if err != nil {
return nil, fmt.Errorf("failed to instantiate client with PAT auth: %w", err)
}
client = c
}
client.SetUserAgent(actions.UserAgentInfo{
Version: build.Version,
CommitSHA: build.CommitSHA,
ScaleSetID: c.RunnerScaleSetId,
HasProxy: hasProxy(),
Subsystem: "ghalistener",
})
return client, nil
}
func hasProxy() bool {
proxyFunc := httpproxy.FromEnvironment().ProxyFunc()
return proxyFunc != nil
}

View File

@@ -3,21 +3,23 @@ package config_test
import (
"context"
"crypto/tls"
"encoding/json"
"log/slog"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"testing"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/github/actions/testserver"
"github.com/go-logr/logr"
"github.com/actions/scaleset"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var discardLogger = slog.New(slog.DiscardHandler)
func TestCustomerServerRootCA(t *testing.T) {
ctx := context.Background()
certsFolder := filepath.Join(
@@ -59,7 +61,7 @@ func TestCustomerServerRootCA(t *testing.T) {
},
}
client, err := config.ActionsClient(logr.Discard())
client, err := config.ActionsClient(discardLogger)
require.NoError(t, err)
_, err = client.GetRunnerScaleSet(ctx, 1, "test")
require.NoError(t, err)
@@ -67,18 +69,19 @@ func TestCustomerServerRootCA(t *testing.T) {
}
func TestProxySettings(t *testing.T) {
assertHasProxy := func(t *testing.T, debugInfo string, want bool) {
type debugInfoContent struct {
HasProxy bool `json:"has_proxy"`
}
var got debugInfoContent
err := json.Unmarshal([]byte(debugInfo), &got)
require.NoError(t, err)
assert.Equal(t, want, got.HasProxy)
}
t.Run("http", func(t *testing.T) {
wentThroughProxy := false
proxy := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
wentThroughProxy = true
}))
t.Cleanup(func() {
proxy.Close()
})
prevProxy := os.Getenv("http_proxy")
os.Setenv("http_proxy", proxy.URL)
os.Setenv("http_proxy", "http://proxy:8080")
defer os.Setenv("http_proxy", prevProxy)
config := config.Config{
@@ -88,29 +91,15 @@ func TestProxySettings(t *testing.T) {
},
}
client, err := config.ActionsClient(logr.Discard())
client, err := config.ActionsClient(discardLogger)
require.NoError(t, err)
req, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
require.NoError(t, err)
_, err = client.Do(req)
require.NoError(t, err)
assert.True(t, wentThroughProxy)
assertHasProxy(t, client.DebugInfo(), true)
})
t.Run("https", func(t *testing.T) {
wentThroughProxy := false
proxy := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
wentThroughProxy = true
}))
t.Cleanup(func() {
proxy.Close()
})
prevProxy := os.Getenv("https_proxy")
os.Setenv("https_proxy", proxy.URL)
os.Setenv("https_proxy", "https://proxy:443")
defer os.Setenv("https_proxy", prevProxy)
config := config.Config{
@@ -120,32 +109,16 @@ func TestProxySettings(t *testing.T) {
},
}
client, err := config.ActionsClient(logr.Discard(), actions.WithRetryMax(0))
client, err := config.ActionsClient(
discardLogger,
scaleset.WithRetryMax(0),
)
require.NoError(t, err)
req, err := http.NewRequest(http.MethodGet, "https://example.com", nil)
require.NoError(t, err)
_, err = client.Do(req)
// proxy doesn't support https
assert.Error(t, err)
assert.True(t, wentThroughProxy)
assertHasProxy(t, client.DebugInfo(), true)
})
t.Run("no_proxy", func(t *testing.T) {
wentThroughProxy := false
proxy := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
wentThroughProxy = true
}))
t.Cleanup(func() {
proxy.Close()
})
prevProxy := os.Getenv("http_proxy")
os.Setenv("http_proxy", proxy.URL)
defer os.Setenv("http_proxy", prevProxy)
prevNoProxy := os.Getenv("no_proxy")
os.Setenv("no_proxy", "example.com")
defer os.Setenv("no_proxy", prevNoProxy)
@@ -157,14 +130,9 @@ func TestProxySettings(t *testing.T) {
},
}
client, err := config.ActionsClient(logr.Discard())
client, err := config.ActionsClient(discardLogger)
require.NoError(t, err)
req, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
require.NoError(t, err)
_, err = client.Do(req)
require.NoError(t, err)
assert.False(t, wentThroughProxy)
assertHasProxy(t, client.DebugInfo(), true)
})
}

View File

@@ -13,7 +13,7 @@ func TestConfigValidationMinMax(t *testing.T) {
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
RunnerScaleSetID: 1,
MinRunners: 5,
MaxRunners: 2,
AppConfig: &appconfig.AppConfig{
@@ -29,7 +29,7 @@ func TestConfigValidationMissingToken(t *testing.T) {
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
RunnerScaleSetID: 1,
}
err := config.Validate()
expectedError := "AppConfig validation failed: missing app config"
@@ -49,7 +49,7 @@ func TestConfigValidationAppKey(t *testing.T) {
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
RunnerScaleSetID: 1,
}
err := config.Validate()
expectedError := "AppConfig validation failed: no credentials provided: either a PAT or GitHub App credentials should be provided"
@@ -66,7 +66,7 @@ func TestConfigValidationAppKey(t *testing.T) {
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
RunnerScaleSetID: 1,
}
err := config.Validate()
expectedError := "AppConfig validation failed: no credentials provided: either a PAT or GitHub App credentials should be provided"
@@ -85,7 +85,7 @@ func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) {
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
RunnerScaleSetID: 1,
}
err := config.Validate()
expectedError := "AppConfig validation failed: both PAT and GitHub App credentials provided. should only provide one"
@@ -97,7 +97,7 @@ func TestConfigValidation(t *testing.T) {
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
RunnerScaleSetID: 1,
MinRunners: 1,
MaxRunners: 5,
AppConfig: &appconfig.AppConfig{
@@ -114,7 +114,7 @@ func TestConfigValidationConfigUrl(t *testing.T) {
config := &Config{
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
RunnerScaleSetID: 1,
}
err := config.Validate()
@@ -128,7 +128,7 @@ func TestConfigValidationWithVaultConfig(t *testing.T) {
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
RunnerScaleSetID: 1,
MinRunners: 1,
MaxRunners: 5,
VaultType: vault.VaultTypeAzureKeyVault,
@@ -143,7 +143,7 @@ func TestConfigValidationWithVaultConfig(t *testing.T) {
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
RunnerScaleSetID: 1,
MinRunners: 1,
MaxRunners: 5,
VaultType: vault.VaultType("invalid_vault_type"),
@@ -158,7 +158,7 @@ func TestConfigValidationWithVaultConfig(t *testing.T) {
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
RunnerScaleSetID: 1,
MinRunners: 1,
MaxRunners: 5,
VaultType: vault.VaultTypeAzureKeyVault,

View File

@@ -1,458 +0,0 @@
package listener
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"os"
"time"
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
"github.com/google/uuid"
)
const (
sessionCreationMaxRetries = 10
)
// message types
const (
messageTypeJobAvailable = "JobAvailable"
messageTypeJobAssigned = "JobAssigned"
messageTypeJobStarted = "JobStarted"
messageTypeJobCompleted = "JobCompleted"
)
//go:generate mockery
type Client interface {
GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*actions.AcquirableJobList, error)
CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*actions.RunnerScaleSetSession, error)
GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error)
DeleteMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, messageId int64) error
AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error)
RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*actions.RunnerScaleSetSession, error)
DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error
}
type Config struct {
Client Client
ScaleSetID int
MinRunners int
MaxRunners int
Logger logr.Logger
Metrics metrics.Publisher
}
func (c *Config) Validate() error {
if c.Client == nil {
return errors.New("client is required")
}
if c.ScaleSetID == 0 {
return errors.New("scaleSetID is required")
}
if c.MinRunners < 0 {
return errors.New("minRunners must be greater than or equal to 0")
}
if c.MaxRunners < 0 {
return errors.New("maxRunners must be greater than or equal to 0")
}
if c.MaxRunners > 0 && c.MinRunners > c.MaxRunners {
return errors.New("minRunners must be less than or equal to maxRunners")
}
return nil
}
// The Listener's role is to manage all interactions with the actions service.
// It receives messages and processes them using the given handler.
type Listener struct {
// configured fields
scaleSetID int // The ID of the scale set associated with the listener.
client Client // The client used to interact with the scale set.
metrics metrics.Publisher // The publisher used to publish metrics.
// internal fields
logger logr.Logger // The logger used for logging.
hostname string // The hostname of the listener.
// updated fields
lastMessageID int64 // The ID of the last processed message.
maxCapacity int // The maximum number of runners that can be created.
session *actions.RunnerScaleSetSession // The session for managing the runner scale set.
}
func New(config Config) (*Listener, error) {
if err := config.Validate(); err != nil {
return nil, fmt.Errorf("invalid config: %w", err)
}
listener := &Listener{
scaleSetID: config.ScaleSetID,
client: config.Client,
logger: config.Logger,
metrics: metrics.Discard,
maxCapacity: config.MaxRunners,
}
if config.Metrics != nil {
listener.metrics = config.Metrics
}
listener.metrics.PublishStatic(config.MinRunners, config.MaxRunners)
hostname, err := os.Hostname()
if err != nil {
hostname = uuid.NewString()
listener.logger.Info("Failed to get hostname, fallback to uuid", "uuid", hostname, "error", err)
}
listener.hostname = hostname
return listener, nil
}
//go:generate mockery
type Handler interface {
HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error
HandleDesiredRunnerCount(ctx context.Context, count, jobsCompleted int) (int, error)
}
// Listen listens for incoming messages and handles them using the provided handler.
// It continuously listens for messages until the context is cancelled.
// The initial message contains the current statistics and acquirable jobs, if any.
// The handler is responsible for handling the initial message and subsequent messages.
// If an error occurs during any step, Listen returns an error.
func (l *Listener) Listen(ctx context.Context, handler Handler) error {
if err := l.createSession(ctx); err != nil {
return fmt.Errorf("createSession failed: %w", err)
}
defer func() {
if err := l.deleteMessageSession(); err != nil {
l.logger.Error(err, "failed to delete message session")
}
}()
initialMessage := &actions.RunnerScaleSetMessage{
MessageId: 0,
MessageType: "RunnerScaleSetJobMessages",
Statistics: l.session.Statistics,
Body: "",
}
if l.session.Statistics == nil {
return fmt.Errorf("session statistics is nil")
}
l.metrics.PublishStatistics(initialMessage.Statistics)
desiredRunners, err := handler.HandleDesiredRunnerCount(ctx, initialMessage.Statistics.TotalAssignedJobs, 0)
if err != nil {
return fmt.Errorf("handling initial message failed: %w", err)
}
l.metrics.PublishDesiredRunners(desiredRunners)
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
msg, err := l.getMessage(ctx)
if err != nil {
return fmt.Errorf("failed to get message: %w", err)
}
if msg == nil {
_, err := handler.HandleDesiredRunnerCount(ctx, 0, 0)
if err != nil {
return fmt.Errorf("handling nil message failed: %w", err)
}
continue
}
// Remove cancellation from the context to avoid cancelling the message handling.
if err := l.handleMessage(context.WithoutCancel(ctx), handler, msg); err != nil {
return fmt.Errorf("failed to handle message: %w", err)
}
}
}
func (l *Listener) handleMessage(ctx context.Context, handler Handler, msg *actions.RunnerScaleSetMessage) error {
parsedMsg, err := l.parseMessage(ctx, msg)
if err != nil {
return fmt.Errorf("failed to parse message: %w", err)
}
l.metrics.PublishStatistics(parsedMsg.statistics)
if len(parsedMsg.jobsAvailable) > 0 {
acquiredJobIDs, err := l.acquireAvailableJobs(ctx, parsedMsg.jobsAvailable)
if err != nil {
return fmt.Errorf("failed to acquire jobs: %w", err)
}
l.logger.Info("Jobs are acquired", "count", len(acquiredJobIDs), "requestIds", fmt.Sprint(acquiredJobIDs))
}
for _, jobCompleted := range parsedMsg.jobsCompleted {
l.metrics.PublishJobCompleted(jobCompleted)
}
l.lastMessageID = msg.MessageId
if err := l.deleteLastMessage(ctx); err != nil {
return fmt.Errorf("failed to delete message: %w", err)
}
for _, jobStarted := range parsedMsg.jobsStarted {
if err := handler.HandleJobStarted(ctx, jobStarted); err != nil {
return fmt.Errorf("failed to handle job started: %w", err)
}
l.metrics.PublishJobStarted(jobStarted)
}
desiredRunners, err := handler.HandleDesiredRunnerCount(ctx, parsedMsg.statistics.TotalAssignedJobs, len(parsedMsg.jobsCompleted))
if err != nil {
return fmt.Errorf("failed to handle desired runner count: %w", err)
}
l.metrics.PublishDesiredRunners(desiredRunners)
return nil
}
func (l *Listener) createSession(ctx context.Context) error {
var session *actions.RunnerScaleSetSession
var retries int
for {
var err error
session, err = l.client.CreateMessageSession(ctx, l.scaleSetID, l.hostname)
if err == nil {
break
}
clientErr := &actions.HttpClientSideError{}
if !errors.As(err, &clientErr) {
return fmt.Errorf("failed to create session: %w", err)
}
if clientErr.Code != http.StatusConflict {
return fmt.Errorf("failed to create session: %w", err)
}
retries++
if retries >= sessionCreationMaxRetries {
return fmt.Errorf("failed to create session after %d retries: %w", retries, err)
}
l.logger.Info("Unable to create message session. Will try again in 30 seconds", "error", err.Error())
select {
case <-ctx.Done():
return fmt.Errorf("context cancelled: %w", ctx.Err())
case <-time.After(30 * time.Second):
}
}
statistics, err := json.Marshal(session.Statistics)
if err != nil {
return fmt.Errorf("failed to marshal statistics: %w", err)
}
l.logger.Info("Current runner scale set statistics.", "statistics", string(statistics))
l.session = session
return nil
}
func (l *Listener) getMessage(ctx context.Context) (*actions.RunnerScaleSetMessage, error) {
l.logger.Info("Getting next message", "lastMessageID", l.lastMessageID)
msg, err := l.client.GetMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID, l.maxCapacity)
if err == nil { // if NO error
return msg, nil
}
expiredError := &actions.MessageQueueTokenExpiredError{}
if !errors.As(err, &expiredError) {
return nil, fmt.Errorf("failed to get next message: %w", err)
}
if err := l.refreshSession(ctx); err != nil {
return nil, err
}
l.logger.Info("Getting next message", "lastMessageID", l.lastMessageID)
msg, err = l.client.GetMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID, l.maxCapacity)
if err != nil { // if NO error
return nil, fmt.Errorf("failed to get next message after message session refresh: %w", err)
}
return msg, nil
}
func (l *Listener) deleteLastMessage(ctx context.Context) error {
l.logger.Info("Deleting last message", "lastMessageID", l.lastMessageID)
err := l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID)
if err == nil { // if NO error
return nil
}
expiredError := &actions.MessageQueueTokenExpiredError{}
if !errors.As(err, &expiredError) {
return fmt.Errorf("failed to delete last message: %w", err)
}
if err := l.refreshSession(ctx); err != nil {
return err
}
err = l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID)
if err != nil {
return fmt.Errorf("failed to delete last message after message session refresh: %w", err)
}
return nil
}
type parsedMessage struct {
statistics *actions.RunnerScaleSetStatistic
jobsStarted []*actions.JobStarted
jobsAvailable []*actions.JobAvailable
jobsCompleted []*actions.JobCompleted
}
func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSetMessage) (*parsedMessage, error) {
if msg.MessageType != "RunnerScaleSetJobMessages" {
l.logger.Info("Skipping message", "messageType", msg.MessageType)
return nil, fmt.Errorf("invalid message type: %s", msg.MessageType)
}
l.logger.Info("Processing message", "messageId", msg.MessageId, "messageType", msg.MessageType)
if msg.Statistics == nil {
return nil, fmt.Errorf("invalid message: statistics is nil")
}
l.logger.Info("New runner scale set statistics.", "statistics", msg.Statistics)
var batchedMessages []json.RawMessage
if len(msg.Body) > 0 {
if err := json.Unmarshal([]byte(msg.Body), &batchedMessages); err != nil {
return nil, fmt.Errorf("failed to unmarshal batched messages: %w", err)
}
}
parsedMsg := &parsedMessage{
statistics: msg.Statistics,
}
for _, msg := range batchedMessages {
var messageType actions.JobMessageType
if err := json.Unmarshal(msg, &messageType); err != nil {
return nil, fmt.Errorf("failed to decode job message type: %w", err)
}
switch messageType.MessageType {
case messageTypeJobAvailable:
var jobAvailable actions.JobAvailable
if err := json.Unmarshal(msg, &jobAvailable); err != nil {
return nil, fmt.Errorf("failed to decode job available: %w", err)
}
l.logger.Info("Job available message received", "jobId", jobAvailable.JobID)
parsedMsg.jobsAvailable = append(parsedMsg.jobsAvailable, &jobAvailable)
case messageTypeJobAssigned:
var jobAssigned actions.JobAssigned
if err := json.Unmarshal(msg, &jobAssigned); err != nil {
return nil, fmt.Errorf("failed to decode job assigned: %w", err)
}
l.logger.Info("Job assigned message received", "jobId", jobAssigned.JobID)
case messageTypeJobStarted:
var jobStarted actions.JobStarted
if err := json.Unmarshal(msg, &jobStarted); err != nil {
return nil, fmt.Errorf("could not decode job started message. %w", err)
}
l.logger.Info("Job started message received.", "JobID", jobStarted.JobID, "RunnerId", jobStarted.RunnerID)
parsedMsg.jobsStarted = append(parsedMsg.jobsStarted, &jobStarted)
case messageTypeJobCompleted:
var jobCompleted actions.JobCompleted
if err := json.Unmarshal(msg, &jobCompleted); err != nil {
return nil, fmt.Errorf("failed to decode job completed: %w", err)
}
l.logger.Info(
"Job completed message received.",
"JobID", jobCompleted.JobID,
"Result", jobCompleted.Result,
"RunnerId", jobCompleted.RunnerId,
"RunnerName", jobCompleted.RunnerName,
)
parsedMsg.jobsCompleted = append(parsedMsg.jobsCompleted, &jobCompleted)
default:
l.logger.Info("unknown job message type.", "messageType", messageType.MessageType)
}
}
return parsedMsg, nil
}
func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*actions.JobAvailable) ([]int64, error) {
ids := make([]int64, 0, len(jobsAvailable))
for _, job := range jobsAvailable {
ids = append(ids, job.RunnerRequestID)
}
l.logger.Info("Acquiring jobs", "count", len(ids), "requestIds", fmt.Sprint(ids))
idsAcquired, err := l.client.AcquireJobs(ctx, l.scaleSetID, l.session.MessageQueueAccessToken, ids)
if err == nil { // if NO errors
return idsAcquired, nil
}
expiredError := &actions.MessageQueueTokenExpiredError{}
if !errors.As(err, &expiredError) {
return nil, fmt.Errorf("failed to acquire jobs: %w", err)
}
if err := l.refreshSession(ctx); err != nil {
return nil, err
}
idsAcquired, err = l.client.AcquireJobs(ctx, l.scaleSetID, l.session.MessageQueueAccessToken, ids)
if err != nil {
return nil, fmt.Errorf("failed to acquire jobs after session refresh: %w", err)
}
return idsAcquired, nil
}
func (l *Listener) refreshSession(ctx context.Context) error {
l.logger.Info("Message queue token is expired during GetNextMessage, refreshing...")
session, err := l.client.RefreshMessageSession(ctx, l.session.RunnerScaleSet.Id, l.session.SessionId)
if err != nil {
return fmt.Errorf("refresh message session failed. %w", err)
}
l.session = session
return nil
}
func (l *Listener) deleteMessageSession() error {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
l.logger.Info("Deleting message session")
if err := l.client.DeleteMessageSession(ctx, l.session.RunnerScaleSet.Id, l.session.SessionId); err != nil {
return fmt.Errorf("failed to delete message session: %w", err)
}
return nil
}

View File

@@ -1,970 +0,0 @@
package listener
import (
"context"
"encoding/json"
"errors"
"net/http"
"testing"
"time"
listenermocks "github.com/actions/actions-runner-controller/cmd/ghalistener/listener/mocks"
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestNew(t *testing.T) {
t.Parallel()
t.Run("InvalidConfig", func(t *testing.T) {
t.Parallel()
var config Config
_, err := New(config)
assert.NotNil(t, err)
})
t.Run("ValidConfig", func(t *testing.T) {
t.Parallel()
config := Config{
Client: listenermocks.NewClient(t),
ScaleSetID: 1,
Metrics: metrics.Discard,
}
l, err := New(config)
assert.Nil(t, err)
assert.NotNil(t, l)
})
}
func TestListener_createSession(t *testing.T) {
t.Parallel()
t.Run("FailOnce", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(nil, assert.AnError).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
err = l.createSession(ctx)
assert.NotNil(t, err)
})
t.Run("FailContext", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(nil,
&actions.HttpClientSideError{Code: http.StatusConflict}).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
err = l.createSession(ctx)
assert.True(t, errors.Is(err, context.DeadlineExceeded))
})
t.Run("SetsSession", func(t *testing.T) {
t.Parallel()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("CreateMessageSession", mock.Anything, mock.Anything, mock.Anything).Return(session, nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
err = l.createSession(context.Background())
assert.Nil(t, err)
assert.Equal(t, session, l.session)
})
}
func TestListener_getMessage(t *testing.T) {
t.Parallel()
t.Run("ReceivesMessage", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
MaxRunners: 10,
}
client := listenermocks.NewClient(t)
want := &actions.RunnerScaleSetMessage{
MessageId: 1,
}
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(want, nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{}
got, err := l.getMessage(ctx)
assert.Nil(t, err)
assert.Equal(t, want, got)
})
t.Run("NotExpiredError", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
MaxRunners: 10,
}
client := listenermocks.NewClient(t)
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(nil, &actions.HttpClientSideError{Code: http.StatusNotFound}).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{}
_, err = l.getMessage(ctx)
assert.NotNil(t, err)
})
t.Run("RefreshAndSucceeds", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
MaxRunners: 10,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once()
want := &actions.RunnerScaleSetMessage{
MessageId: 1,
}
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(want, nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
got, err := l.getMessage(ctx)
assert.Nil(t, err)
assert.Equal(t, want, got)
})
t.Run("RefreshAndFails", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
MaxRunners: 10,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(nil, &actions.MessageQueueTokenExpiredError{}).Twice()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
got, err := l.getMessage(ctx)
assert.NotNil(t, err)
assert.Nil(t, got)
})
}
func TestListener_refreshSession(t *testing.T) {
t.Parallel()
t.Run("SuccessfullyRefreshes", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
newUUID := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &newUUID,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
oldUUID := uuid.New()
l.session = &actions.RunnerScaleSetSession{
SessionId: &oldUUID,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
err = l.refreshSession(ctx)
assert.Nil(t, err)
assert.Equal(t, session, l.session)
})
t.Run("FailsToRefresh", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(nil, errors.New("error")).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
oldUUID := uuid.New()
oldSession := &actions.RunnerScaleSetSession{
SessionId: &oldUUID,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
l.session = oldSession
err = l.refreshSession(ctx)
assert.NotNil(t, err)
assert.Equal(t, oldSession, l.session)
})
}
func TestListener_deleteLastMessage(t *testing.T) {
t.Parallel()
t.Run("SuccessfullyDeletes", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.MatchedBy(func(lastMessageID any) bool {
return lastMessageID.(int64) == int64(5)
})).Return(nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{}
l.lastMessageID = 5
err = l.deleteLastMessage(ctx)
assert.Nil(t, err)
})
t.Run("FailsToDelete", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("error")).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{}
l.lastMessageID = 5
err = l.deleteLastMessage(ctx)
assert.NotNil(t, err)
})
t.Run("RefreshAndSucceeds", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
newUUID := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &newUUID,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(&actions.MessageQueueTokenExpiredError{}).Once()
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.MatchedBy(func(lastMessageID any) bool {
return lastMessageID.(int64) == int64(5)
})).Return(nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
oldUUID := uuid.New()
l.session = &actions.RunnerScaleSetSession{
SessionId: &oldUUID,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
l.lastMessageID = 5
config.Client = client
err = l.deleteLastMessage(ctx)
assert.NoError(t, err)
})
t.Run("RefreshAndFails", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
newUUID := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &newUUID,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(&actions.MessageQueueTokenExpiredError{}).Twice()
config.Client = client
l, err := New(config)
require.Nil(t, err)
oldUUID := uuid.New()
l.session = &actions.RunnerScaleSetSession{
SessionId: &oldUUID,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
l.lastMessageID = 5
config.Client = client
err = l.deleteLastMessage(ctx)
assert.Error(t, err)
})
}
func TestListener_Listen(t *testing.T) {
t.Parallel()
t.Run("CreateSessionFails", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(nil, assert.AnError).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
err = l.Listen(ctx, nil)
assert.NotNil(t, err)
})
t.Run("CallHandleRegardlessOfInitialMessage", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: &actions.RunnerScaleSetStatistic{},
}
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessageSession", mock.Anything, session.RunnerScaleSet.Id, session.SessionId).Return(nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
var called bool
handler := listenermocks.NewHandler(t)
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 0).
Return(0, nil).
Run(
func(mock.Arguments) {
called = true
cancel()
},
).
Once()
err = l.Listen(ctx, handler)
assert.True(t, errors.Is(err, context.Canceled))
assert.True(t, called)
})
t.Run("CancelContextAfterGetMessage", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
MaxRunners: 10,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: &actions.RunnerScaleSetStatistic{},
}
client.On("CreateMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessageSession", mock.Anything, session.RunnerScaleSet.Id, session.SessionId).Return(nil).Once()
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Statistics: &actions.RunnerScaleSetStatistic{},
}
client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).
Return(msg, nil).
Run(
func(mock.Arguments) {
cancel()
},
).
Once()
// Ensure delete message is called without cancel
client.On("DeleteMessage", context.WithoutCancel(ctx), mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
config.Client = client
handler := listenermocks.NewHandler(t)
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 0).
Return(0, nil).
Once()
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 0).
Return(0, nil).
Once()
l, err := New(config)
require.Nil(t, err)
err = l.Listen(ctx, handler)
assert.ErrorIs(t, context.Canceled, err)
})
}
func TestListener_acquireAvailableJobs(t *testing.T) {
t.Parallel()
t.Run("FailingToAcquireJobs", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, assert.AnError).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
uuid := uuid.New()
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: &actions.RunnerScaleSetStatistic{},
}
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 3,
},
},
}
_, err = l.acquireAvailableJobs(ctx, availableJobs)
assert.Error(t, err)
})
t.Run("SuccessfullyAcquiresJobsOnFirstRun", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
jobIDs := []int64{1, 2, 3}
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(jobIDs, nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
uuid := uuid.New()
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: &actions.RunnerScaleSetStatistic{},
}
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 3,
},
},
}
acquiredJobIDs, err := l.acquireAvailableJobs(ctx, availableJobs)
assert.NoError(t, err)
assert.Equal(t, []int64{1, 2, 3}, acquiredJobIDs)
})
t.Run("RefreshAndSucceeds", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
// Second call to AcquireJobs will succeed
want := []int64{1, 2, 3}
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 3,
},
},
}
// First call to AcquireJobs will fail with a token expired error
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).
Run(func(args mock.Arguments) {
ids := args.Get(3).([]int64)
assert.Equal(t, want, ids)
}).
Return(nil, &actions.MessageQueueTokenExpiredError{}).
Once()
// Second call should succeed
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).
Run(func(args mock.Arguments) {
ids := args.Get(3).([]int64)
assert.Equal(t, want, ids)
}).
Return(want, nil).
Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
got, err := l.acquireAvailableJobs(ctx, availableJobs)
assert.Nil(t, err)
assert.Equal(t, want, got)
})
t.Run("RefreshAndFails", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, &actions.MessageQueueTokenExpiredError{}).Twice()
config.Client = client
l, err := New(config)
require.Nil(t, err)
l.session = &actions.RunnerScaleSetSession{
SessionId: &uuid,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestID: 3,
},
},
}
got, err := l.acquireAvailableJobs(ctx, availableJobs)
assert.NotNil(t, err)
assert.Nil(t, got)
})
}
func TestListener_parseMessage(t *testing.T) {
t.Run("FailOnEmptyStatistics", func(t *testing.T) {
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Statistics: nil,
}
l := &Listener{}
parsedMsg, err := l.parseMessage(context.Background(), msg)
assert.Error(t, err)
assert.Nil(t, parsedMsg)
})
t.Run("FailOnIncorrectMessageType", func(t *testing.T) {
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerMessages", // arbitrary message type
Statistics: &actions.RunnerScaleSetStatistic{},
}
l := &Listener{}
parsedMsg, err := l.parseMessage(context.Background(), msg)
assert.Error(t, err)
assert.Nil(t, parsedMsg)
})
t.Run("ParseAll", func(t *testing.T) {
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Body: "",
Statistics: &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
TotalAcquiredJobs: 2,
TotalAssignedJobs: 3,
TotalRunningJobs: 4,
TotalRegisteredRunners: 5,
TotalBusyRunners: 6,
TotalIdleRunners: 7,
},
}
var batchedMessages []any
jobsAvailable := []*actions.JobAvailable{
{
AcquireJobUrl: "https://github.com/example",
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAvailable,
},
RunnerRequestID: 1,
},
},
{
AcquireJobUrl: "https://github.com/example",
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAvailable,
},
RunnerRequestID: 2,
},
},
}
for _, msg := range jobsAvailable {
batchedMessages = append(batchedMessages, msg)
}
jobsAssigned := []*actions.JobAssigned{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAssigned,
},
RunnerRequestID: 3,
},
},
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAssigned,
},
RunnerRequestID: 4,
},
},
}
for _, msg := range jobsAssigned {
batchedMessages = append(batchedMessages, msg)
}
jobsStarted := []*actions.JobStarted{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobStarted,
},
RunnerRequestID: 5,
},
RunnerID: 2,
RunnerName: "runner2",
},
}
for _, msg := range jobsStarted {
batchedMessages = append(batchedMessages, msg)
}
jobsCompleted := []*actions.JobCompleted{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobCompleted,
},
RunnerRequestID: 6,
},
Result: "success",
RunnerId: 1,
RunnerName: "runner1",
},
}
for _, msg := range jobsCompleted {
batchedMessages = append(batchedMessages, msg)
}
b, err := json.Marshal(batchedMessages)
require.NoError(t, err)
msg.Body = string(b)
l := &Listener{}
parsedMsg, err := l.parseMessage(context.Background(), msg)
require.NoError(t, err)
assert.Equal(t, msg.Statistics, parsedMsg.statistics)
assert.Equal(t, jobsAvailable, parsedMsg.jobsAvailable)
assert.Equal(t, jobsStarted, parsedMsg.jobsStarted)
assert.Equal(t, jobsCompleted, parsedMsg.jobsCompleted)
})
}

View File

@@ -1,205 +0,0 @@
package listener
import (
"context"
"encoding/json"
"testing"
listenermocks "github.com/actions/actions-runner-controller/cmd/ghalistener/listener/mocks"
metricsmocks "github.com/actions/actions-runner-controller/cmd/ghalistener/metrics/mocks"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestInitialMetrics(t *testing.T) {
t.Parallel()
t.Run("SetStaticMetrics", func(t *testing.T) {
t.Parallel()
metrics := metricsmocks.NewMockPublisher(t)
minRunners := 5
maxRunners := 10
metrics.On("PublishStatic", minRunners, maxRunners).Once()
config := Config{
Client: listenermocks.NewClient(t),
ScaleSetID: 1,
Metrics: metrics,
MinRunners: minRunners,
MaxRunners: maxRunners,
}
l, err := New(config)
assert.Nil(t, err)
assert.NotNil(t, l)
})
t.Run("InitialMessageStatistics", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
sessionStatistics := &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
TotalAcquiredJobs: 2,
TotalAssignedJobs: 3,
TotalRunningJobs: 4,
TotalRegisteredRunners: 5,
TotalBusyRunners: 6,
TotalIdleRunners: 7,
}
uuid := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &uuid,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: sessionStatistics,
}
metrics := metricsmocks.NewMockPublisher(t)
metrics.On("PublishStatic", mock.Anything, mock.Anything).Once()
metrics.On("PublishStatistics", sessionStatistics).Once()
metrics.On("PublishDesiredRunners", sessionStatistics.TotalAssignedJobs).
Run(
func(mock.Arguments) {
cancel()
},
).Once()
config := Config{
Client: listenermocks.NewClient(t),
ScaleSetID: 1,
Metrics: metrics,
}
client := listenermocks.NewClient(t)
client.On("CreateMessageSession", mock.Anything, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessageSession", mock.Anything, session.RunnerScaleSet.Id, session.SessionId).Return(nil).Once()
config.Client = client
handler := listenermocks.NewHandler(t)
handler.On("HandleDesiredRunnerCount", mock.Anything, sessionStatistics.TotalAssignedJobs, 0).
Return(sessionStatistics.TotalAssignedJobs, nil).
Once()
l, err := New(config)
assert.Nil(t, err)
assert.NotNil(t, l)
assert.ErrorIs(t, context.Canceled, l.Listen(ctx, handler))
})
}
func TestHandleMessageMetrics(t *testing.T) {
t.Parallel()
msg := &actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Body: "",
Statistics: &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
TotalAcquiredJobs: 2,
TotalAssignedJobs: 3,
TotalRunningJobs: 4,
TotalRegisteredRunners: 5,
TotalBusyRunners: 6,
TotalIdleRunners: 7,
},
}
var batchedMessages []any
jobsStarted := []*actions.JobStarted{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobStarted,
},
RunnerRequestID: 8,
},
RunnerID: 3,
RunnerName: "runner3",
},
}
for _, msg := range jobsStarted {
batchedMessages = append(batchedMessages, msg)
}
jobsCompleted := []*actions.JobCompleted{
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobCompleted,
},
RunnerRequestID: 6,
},
Result: "success",
RunnerId: 1,
RunnerName: "runner1",
},
{
JobMessageBase: actions.JobMessageBase{
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobCompleted,
},
RunnerRequestID: 7,
},
Result: "success",
RunnerId: 2,
RunnerName: "runner2",
},
}
for _, msg := range jobsCompleted {
batchedMessages = append(batchedMessages, msg)
}
b, err := json.Marshal(batchedMessages)
require.NoError(t, err)
msg.Body = string(b)
desiredResult := 4
metrics := metricsmocks.NewMockPublisher(t)
metrics.On("PublishStatic", 0, 0).Once()
metrics.On("PublishStatistics", msg.Statistics).Once()
metrics.On("PublishJobCompleted", jobsCompleted[0]).Once()
metrics.On("PublishJobCompleted", jobsCompleted[1]).Once()
metrics.On("PublishJobStarted", jobsStarted[0]).Once()
metrics.On("PublishDesiredRunners", desiredResult).Once()
handler := listenermocks.NewHandler(t)
handler.On("HandleJobStarted", mock.Anything, jobsStarted[0]).Return(nil).Once()
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 2).Return(desiredResult, nil).Once()
client := listenermocks.NewClient(t)
client.On("DeleteMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
config := Config{
Client: listenermocks.NewClient(t),
ScaleSetID: 1,
Metrics: metrics,
}
l, err := New(config)
require.NoError(t, err)
l.client = client
l.session = &actions.RunnerScaleSetSession{
OwnerName: "",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "",
MessageQueueAccessToken: "",
Statistics: &actions.RunnerScaleSetStatistic{},
}
err = l.handleMessage(context.Background(), handler, msg)
require.NoError(t, err)
}

View File

@@ -1,190 +0,0 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package mocks
import (
context "context"
actions "github.com/actions/actions-runner-controller/github/actions"
mock "github.com/stretchr/testify/mock"
uuid "github.com/google/uuid"
)
// Client is an autogenerated mock type for the Client type
type Client struct {
mock.Mock
}
// AcquireJobs provides a mock function with given fields: ctx, runnerScaleSetId, messageQueueAccessToken, requestIds
func (_m *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) {
ret := _m.Called(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
var r0 []int64
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, string, []int64) ([]int64, error)); ok {
return rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
}
if rf, ok := ret.Get(0).(func(context.Context, int, string, []int64) []int64); ok {
r0 = rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]int64)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int, string, []int64) error); ok {
r1 = rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CreateMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, owner
func (_m *Client) CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*actions.RunnerScaleSetSession, error) {
ret := _m.Called(ctx, runnerScaleSetId, owner)
var r0 *actions.RunnerScaleSetSession
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, string) (*actions.RunnerScaleSetSession, error)); ok {
return rf(ctx, runnerScaleSetId, owner)
}
if rf, ok := ret.Get(0).(func(context.Context, int, string) *actions.RunnerScaleSetSession); ok {
r0 = rf(ctx, runnerScaleSetId, owner)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*actions.RunnerScaleSetSession)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int, string) error); ok {
r1 = rf(ctx, runnerScaleSetId, owner)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DeleteMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, messageId
func (_m *Client) DeleteMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, messageId int64) error {
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, messageId)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) error); ok {
r0 = rf(ctx, messageQueueUrl, messageQueueAccessToken, messageId)
} else {
r0 = ret.Error(0)
}
return r0
}
// DeleteMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
func (_m *Client) DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error {
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) error); ok {
r0 = rf(ctx, runnerScaleSetId, sessionId)
} else {
r0 = ret.Error(0)
}
return r0
}
// GetAcquirableJobs provides a mock function with given fields: ctx, runnerScaleSetId
func (_m *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*actions.AcquirableJobList, error) {
ret := _m.Called(ctx, runnerScaleSetId)
var r0 *actions.AcquirableJobList
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int) (*actions.AcquirableJobList, error)); ok {
return rf(ctx, runnerScaleSetId)
}
if rf, ok := ret.Get(0).(func(context.Context, int) *actions.AcquirableJobList); ok {
r0 = rf(ctx, runnerScaleSetId)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*actions.AcquirableJobList)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int) error); ok {
r1 = rf(ctx, runnerScaleSetId)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity
func (_m *Client) GetMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error) {
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
var r0 *actions.RunnerScaleSetMessage
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64, int) (*actions.RunnerScaleSetMessage, error)); ok {
return rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
}
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64, int) *actions.RunnerScaleSetMessage); ok {
r0 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*actions.RunnerScaleSetMessage)
}
}
if rf, ok := ret.Get(1).(func(context.Context, string, string, int64, int) error); ok {
r1 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// RefreshMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
func (_m *Client) RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*actions.RunnerScaleSetSession, error) {
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
var r0 *actions.RunnerScaleSetSession
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) (*actions.RunnerScaleSetSession, error)); ok {
return rf(ctx, runnerScaleSetId, sessionId)
}
if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) *actions.RunnerScaleSetSession); ok {
r0 = rf(ctx, runnerScaleSetId, sessionId)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*actions.RunnerScaleSetSession)
}
}
if rf, ok := ret.Get(1).(func(context.Context, int, *uuid.UUID) error); ok {
r1 = rf(ctx, runnerScaleSetId, sessionId)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewClient(t interface {
mock.TestingT
Cleanup(func())
}) *Client {
mock := &Client{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -1,68 +0,0 @@
// Code generated by mockery v2.36.1. DO NOT EDIT.
package mocks
import (
context "context"
actions "github.com/actions/actions-runner-controller/github/actions"
mock "github.com/stretchr/testify/mock"
)
// Handler is an autogenerated mock type for the Handler type
type Handler struct {
mock.Mock
}
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count, jobsCompleted
func (_m *Handler) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error) {
ret := _m.Called(ctx, count, jobsCompleted)
var r0 int
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int, int) (int, error)); ok {
return rf(ctx, count, jobsCompleted)
}
if rf, ok := ret.Get(0).(func(context.Context, int, int) int); ok {
r0 = rf(ctx, count, jobsCompleted)
} else {
r0 = ret.Get(0).(int)
}
if rf, ok := ret.Get(1).(func(context.Context, int, int) error); ok {
r1 = rf(ctx, count, jobsCompleted)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// HandleJobStarted provides a mock function with given fields: ctx, jobInfo
func (_m *Handler) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
ret := _m.Called(ctx, jobInfo)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *actions.JobStarted) error); ok {
r0 = rf(ctx, jobInfo)
} else {
r0 = ret.Error(0)
}
return r0
}
// NewHandler creates a new instance of Handler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewHandler(t interface {
mock.TestingT
Cleanup(func())
}) *Handler {
mock := &Handler{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -8,8 +8,13 @@ import (
"os/signal"
"syscall"
"github.com/actions/actions-runner-controller/cmd/ghalistener/app"
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
"github.com/actions/actions-runner-controller/cmd/ghalistener/scaler"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/scaleset/listener"
"github.com/google/uuid"
"golang.org/x/sync/errgroup"
)
func main() {
@@ -28,14 +33,116 @@ func main() {
os.Exit(1)
}
app, err := app.New(*config)
if err != nil {
log.Printf("Failed to initialize app: %v", err)
os.Exit(1)
}
if err := app.Run(ctx); err != nil {
if err := run(ctx, config); err != nil {
log.Printf("Application returned an error: %v", err)
os.Exit(1)
}
}
func run(ctx context.Context, config *config.Config) error {
ghConfig, err := actions.ParseGitHubConfigFromURL(config.ConfigureUrl)
if err != nil {
return fmt.Errorf("failed to parse GitHub config from URL: %w", err)
}
logger, err := config.Logger()
if err != nil {
return fmt.Errorf("failed to create logger: %w", err)
}
var metricsExporter metrics.ServerExporter
if config.MetricsAddr != "" {
metricsExporter = metrics.NewExporter(metrics.ExporterConfig{
ScaleSetName: config.EphemeralRunnerSetName,
ScaleSetNamespace: config.EphemeralRunnerSetNamespace,
Enterprise: ghConfig.Enterprise,
Organization: ghConfig.Organization,
Repository: ghConfig.Repository,
ServerAddr: config.MetricsAddr,
ServerEndpoint: config.MetricsEndpoint,
Metrics: config.Metrics,
Logger: logger.With("component", "metrics exporter"),
})
}
hostname, err := os.Hostname()
if err != nil {
hostname = uuid.NewString()
logger.Info("Failed to get hostname, fallback to uuid", "uuid", hostname, "error", err)
}
scalesetClient, err := config.ActionsClient(logger)
if err != nil {
return fmt.Errorf("failed to create actions client: %w", err)
}
sessionClient, err := scalesetClient.MessageSessionClient(
ctx,
config.RunnerScaleSetID,
hostname,
)
if err != nil {
return fmt.Errorf("failed to create actions message session client: %w", err)
}
defer func() {
if err := sessionClient.Close(context.Background()); err != nil {
logger.Error("Failed to close session client", "error", err)
}
}()
var listenerOptions []listener.Option
if metricsExporter != nil {
listenerOptions = append(
listenerOptions,
listener.WithMetricsRecorder(
metricsExporter,
),
)
metricsExporter.RecordStatic(config.MinRunners, config.MaxRunners)
}
listener, err := listener.New(
sessionClient,
listener.Config{
ScaleSetID: config.RunnerScaleSetID,
MaxRunners: config.MaxRunners,
Logger: logger.With("component", "listener"),
},
listenerOptions...,
)
if err != nil {
return fmt.Errorf("failed to create new listener: %w", err)
}
scaler, err := scaler.New(
scaler.Config{
EphemeralRunnerSetNamespace: config.EphemeralRunnerSetNamespace,
EphemeralRunnerSetName: config.EphemeralRunnerSetName,
MaxRunners: config.MaxRunners,
MinRunners: config.MinRunners,
},
scaler.WithLogger(logger.With("component", "worker")),
)
if err != nil {
return fmt.Errorf("failed to create new kubernetes worker: %w", err)
}
g, ctx := errgroup.WithContext(ctx)
metricsCtx, cancelMetrics := context.WithCancelCause(ctx)
g.Go(func() error {
logger.Info("Starting listener")
listnerErr := listener.Run(ctx, scaler)
cancelMetrics(fmt.Errorf("listener exited: %w", listnerErr))
return listnerErr
})
if metricsExporter != nil {
g.Go(func() error {
logger.Info("Starting metrics server")
return metricsExporter.ListenAndServe(metricsCtx)
})
}
return g.Wait()
}

View File

@@ -2,14 +2,13 @@ package metrics
import (
"context"
"errors"
"log/slog"
"net/http"
"strings"
"time"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
"github.com/actions/scaleset"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
@@ -76,7 +75,7 @@ var metricsHelp = metricsHelpRegistry{
},
}
func (e *exporter) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels {
func (e *exporter) jobLabels(jobBase *scaleset.JobMessageBase) prometheus.Labels {
workflowRefInfo := ParseWorkflowRef(jobBase.JobWorkflowRef)
return prometheus.Labels{
labelKeyEnterprise: e.scaleSetLabels[labelKeyEnterprise],
@@ -90,40 +89,38 @@ func (e *exporter) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels
}
}
func (e *exporter) completedJobLabels(msg *actions.JobCompleted) prometheus.Labels {
func (e *exporter) completedJobLabels(msg *scaleset.JobCompleted) prometheus.Labels {
l := e.jobLabels(&msg.JobMessageBase)
l[labelKeyJobResult] = msg.Result
return l
}
func (e *exporter) startedJobLabels(msg *actions.JobStarted) prometheus.Labels {
func (e *exporter) startedJobLabels(msg *scaleset.JobStarted) prometheus.Labels {
return e.jobLabels(&msg.JobMessageBase)
}
//go:generate mockery
type Publisher interface {
PublishStatic(min, max int)
PublishStatistics(stats *actions.RunnerScaleSetStatistic)
PublishJobStarted(msg *actions.JobStarted)
PublishJobCompleted(msg *actions.JobCompleted)
PublishDesiredRunners(count int)
type Recorder interface {
RecordStatic(min, max int)
RecordStatistics(stats *scaleset.RunnerScaleSetStatistic)
RecordJobStarted(msg *scaleset.JobStarted)
RecordJobCompleted(msg *scaleset.JobCompleted)
RecordDesiredRunners(count int)
}
//go:generate mockery
type ServerExporter interface {
Publisher
Recorder
ListenAndServe(ctx context.Context) error
}
var (
_ Publisher = &discard{}
_ Recorder = &discard{}
_ ServerExporter = &exporter{}
)
var Discard Publisher = &discard{}
var Discard Recorder = &discard{}
type exporter struct {
logger logr.Logger
logger *slog.Logger
scaleSetLabels prometheus.Labels
*metrics
srv *http.Server
@@ -158,7 +155,7 @@ type ExporterConfig struct {
Repository string
ServerAddr string
ServerEndpoint string
Logger logr.Logger
Logger *slog.Logger
Metrics *v1alpha1.MetricsConfig
}
@@ -309,7 +306,7 @@ func NewExporter(config ExporterConfig) ServerExporter {
)
return &exporter{
logger: config.Logger.WithName("metrics"),
logger: config.Logger.With("component", "metrics exporter"),
scaleSetLabels: prometheus.Labels{
labelKeyRunnerScaleSetName: config.ScaleSetName,
labelKeyRunnerScaleSetNamespace: config.ScaleSetNamespace,
@@ -325,9 +322,7 @@ func NewExporter(config ExporterConfig) ServerExporter {
}
}
var errUnknownMetricName = errors.New("unknown metric name")
func installMetrics(config v1alpha1.MetricsConfig, reg *prometheus.Registry, logger logr.Logger) *metrics {
func installMetrics(config v1alpha1.MetricsConfig, reg *prometheus.Registry, logger *slog.Logger) *metrics {
logger.Info(
"Registering metrics",
"gauges",
@@ -345,7 +340,11 @@ func installMetrics(config v1alpha1.MetricsConfig, reg *prometheus.Registry, log
for name, cfg := range config.Gauges {
help, ok := metricsHelp.gauges[name]
if !ok {
logger.Error(errUnknownMetricName, "name", name, "kind", "gauge")
logger.Error(
"unknown metric name",
slog.String("name", name),
slog.String("kind", "gauge"),
)
continue
}
@@ -367,7 +366,11 @@ func installMetrics(config v1alpha1.MetricsConfig, reg *prometheus.Registry, log
for name, cfg := range config.Counters {
help, ok := metricsHelp.counters[name]
if !ok {
logger.Error(errUnknownMetricName, "name", name, "kind", "counter")
logger.Error(
"unknown metric name",
slog.String("name", name),
slog.String("kind", "counter"),
)
continue
}
c := prometheus.V2.NewCounterVec(prometheus.CounterVecOpts{
@@ -388,7 +391,11 @@ func installMetrics(config v1alpha1.MetricsConfig, reg *prometheus.Registry, log
for name, cfg := range config.Histograms {
help, ok := metricsHelp.histograms[name]
if !ok {
logger.Error(errUnknownMetricName, "name", name, "kind", "histogram")
logger.Error(
"unknown metric name",
slog.String("name", name),
slog.String("kind", "histogram"),
)
continue
}
@@ -464,12 +471,12 @@ func (e *exporter) observeHistogram(name string, allLabels prometheus.Labels, va
m.histogram.With(labels).Observe(val)
}
func (e *exporter) PublishStatic(min, max int) {
func (e *exporter) RecordStatic(min, max int) {
e.setGauge(MetricMaxRunners, e.scaleSetLabels, float64(max))
e.setGauge(MetricMinRunners, e.scaleSetLabels, float64(min))
}
func (e *exporter) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
func (e *exporter) RecordStatistics(stats *scaleset.RunnerScaleSetStatistic) {
e.setGauge(MetricAssignedJobs, e.scaleSetLabels, float64(stats.TotalAssignedJobs))
e.setGauge(MetricRunningJobs, e.scaleSetLabels, float64(stats.TotalRunningJobs))
e.setGauge(MetricRegisteredRunners, e.scaleSetLabels, float64(stats.TotalRegisteredRunners))
@@ -477,7 +484,7 @@ func (e *exporter) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
e.setGauge(MetricIdleRunners, e.scaleSetLabels, float64(stats.TotalIdleRunners))
}
func (e *exporter) PublishJobStarted(msg *actions.JobStarted) {
func (e *exporter) RecordJobStarted(msg *scaleset.JobStarted) {
l := e.startedJobLabels(msg)
e.incCounter(MetricStartedJobsTotal, l)
@@ -485,7 +492,7 @@ func (e *exporter) PublishJobStarted(msg *actions.JobStarted) {
e.observeHistogram(MetricJobStartupDurationSeconds, l, float64(startupDuration))
}
func (e *exporter) PublishJobCompleted(msg *actions.JobCompleted) {
func (e *exporter) RecordJobCompleted(msg *scaleset.JobCompleted) {
l := e.completedJobLabels(msg)
e.incCounter(MetricCompletedJobsTotal, l)
@@ -493,17 +500,17 @@ func (e *exporter) PublishJobCompleted(msg *actions.JobCompleted) {
e.observeHistogram(MetricJobExecutionDurationSeconds, l, float64(executionDuration))
}
func (e *exporter) PublishDesiredRunners(count int) {
func (e *exporter) RecordDesiredRunners(count int) {
e.setGauge(MetricDesiredRunners, e.scaleSetLabels, float64(count))
}
type discard struct{}
func (*discard) PublishStatic(int, int) {}
func (*discard) PublishStatistics(*actions.RunnerScaleSetStatistic) {}
func (*discard) PublishJobStarted(*actions.JobStarted) {}
func (*discard) PublishJobCompleted(*actions.JobCompleted) {}
func (*discard) PublishDesiredRunners(int) {}
func (*discard) RecordStatic(int, int) {}
func (*discard) RecordStatistics(*scaleset.RunnerScaleSetStatistic) {}
func (*discard) RecordJobStarted(*scaleset.JobStarted) {}
func (*discard) RecordJobCompleted(*scaleset.JobCompleted) {}
func (*discard) RecordDesiredRunners(int) {}
var defaultRuntimeBuckets []float64 = []float64{
0.01,

View File

@@ -3,7 +3,7 @@ package metrics
import (
"testing"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/scaleset"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
)
@@ -22,13 +22,13 @@ func TestMetricsWithWorkflowRefParsing(t *testing.T) {
tests := []struct {
name string
jobBase actions.JobMessageBase
jobBase scaleset.JobMessageBase
wantName string
wantTarget string
}{
{
name: "main branch workflow",
jobBase: actions.JobMessageBase{
jobBase: scaleset.JobMessageBase{
OwnerName: "actions",
RepositoryName: "runner",
JobDisplayName: "Build and Test",
@@ -40,7 +40,7 @@ func TestMetricsWithWorkflowRefParsing(t *testing.T) {
},
{
name: "feature branch workflow",
jobBase: actions.JobMessageBase{
jobBase: scaleset.JobMessageBase{
OwnerName: "myorg",
RepositoryName: "myrepo",
JobDisplayName: "CI/CD Pipeline",
@@ -52,7 +52,7 @@ func TestMetricsWithWorkflowRefParsing(t *testing.T) {
},
{
name: "pull request workflow",
jobBase: actions.JobMessageBase{
jobBase: scaleset.JobMessageBase{
OwnerName: "actions",
RepositoryName: "runner",
JobDisplayName: "PR Checks",
@@ -64,7 +64,7 @@ func TestMetricsWithWorkflowRefParsing(t *testing.T) {
},
{
name: "tag workflow",
jobBase: actions.JobMessageBase{
jobBase: scaleset.JobMessageBase{
OwnerName: "actions",
RepositoryName: "runner",
JobDisplayName: "Release",

View File

@@ -1,15 +1,17 @@
package metrics
import (
"log/slog"
"testing"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var discardLogger = slog.New(slog.DiscardHandler)
func TestInstallMetrics(t *testing.T) {
metricsConfig := v1alpha1.MetricsConfig{
Counters: map[string]*v1alpha1.CounterMetric{
@@ -74,7 +76,7 @@ func TestInstallMetrics(t *testing.T) {
}
reg := prometheus.NewRegistry()
got := installMetrics(metricsConfig, reg, logr.Discard())
got := installMetrics(metricsConfig, reg, discardLogger)
assert.Len(t, got.counters, 1)
assert.Len(t, got.gauges, 1)
assert.Len(t, got.histograms, 2)
@@ -98,7 +100,7 @@ func TestNewExporter(t *testing.T) {
Repository: "repo",
ServerAddr: ":6060",
ServerEndpoint: "/metrics",
Logger: logr.Discard(),
Logger: discardLogger,
Metrics: nil, // when metrics is nil, all default metrics should be registered
}
@@ -140,7 +142,7 @@ func TestNewExporter(t *testing.T) {
Repository: "repo",
ServerAddr: "", // empty ServerAddr should default to ":8080"
ServerEndpoint: "",
Logger: logr.Discard(),
Logger: discardLogger,
Metrics: nil, // when metrics is nil, all default metrics should be registered
}
@@ -201,7 +203,7 @@ func TestNewExporter(t *testing.T) {
Repository: "repo",
ServerAddr: ":6060",
ServerEndpoint: "/metrics",
Logger: logr.Discard(),
Logger: discardLogger,
Metrics: &metricsConfig,
}
@@ -244,7 +246,7 @@ func TestExporterConfigDefaults(t *testing.T) {
Repository: "repo",
ServerAddr: "",
ServerEndpoint: "",
Logger: logr.Discard(),
Logger: discardLogger,
Metrics: nil, // when metrics is nil, all default metrics should be registered
}
@@ -257,7 +259,7 @@ func TestExporterConfigDefaults(t *testing.T) {
Repository: "repo",
ServerAddr: ":8080", // default server address
ServerEndpoint: "/metrics", // default server endpoint
Logger: logr.Discard(),
Logger: discardLogger,
Metrics: &defaultMetrics, // when metrics is nil, all default metrics should be registered
}

View File

@@ -1,243 +0,0 @@
// Code generated by mockery; DO NOT EDIT.
// github.com/vektra/mockery
// template: testify
package mocks
import (
"github.com/actions/actions-runner-controller/github/actions"
mock "github.com/stretchr/testify/mock"
)
// NewMockPublisher creates a new instance of MockPublisher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockPublisher(t interface {
mock.TestingT
Cleanup(func())
}) *MockPublisher {
mock := &MockPublisher{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
// MockPublisher is an autogenerated mock type for the Publisher type
type MockPublisher struct {
mock.Mock
}
type MockPublisher_Expecter struct {
mock *mock.Mock
}
func (_m *MockPublisher) EXPECT() *MockPublisher_Expecter {
return &MockPublisher_Expecter{mock: &_m.Mock}
}
// PublishDesiredRunners provides a mock function for the type MockPublisher
func (_mock *MockPublisher) PublishDesiredRunners(count int) {
_mock.Called(count)
return
}
// MockPublisher_PublishDesiredRunners_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PublishDesiredRunners'
type MockPublisher_PublishDesiredRunners_Call struct {
*mock.Call
}
// PublishDesiredRunners is a helper method to define mock.On call
// - count int
func (_e *MockPublisher_Expecter) PublishDesiredRunners(count interface{}) *MockPublisher_PublishDesiredRunners_Call {
return &MockPublisher_PublishDesiredRunners_Call{Call: _e.mock.On("PublishDesiredRunners", count)}
}
func (_c *MockPublisher_PublishDesiredRunners_Call) Run(run func(count int)) *MockPublisher_PublishDesiredRunners_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 int
if args[0] != nil {
arg0 = args[0].(int)
}
run(
arg0,
)
})
return _c
}
func (_c *MockPublisher_PublishDesiredRunners_Call) Return() *MockPublisher_PublishDesiredRunners_Call {
_c.Call.Return()
return _c
}
func (_c *MockPublisher_PublishDesiredRunners_Call) RunAndReturn(run func(count int)) *MockPublisher_PublishDesiredRunners_Call {
_c.Run(run)
return _c
}
// PublishJobCompleted provides a mock function for the type MockPublisher
func (_mock *MockPublisher) PublishJobCompleted(msg *actions.JobCompleted) {
_mock.Called(msg)
return
}
// MockPublisher_PublishJobCompleted_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PublishJobCompleted'
type MockPublisher_PublishJobCompleted_Call struct {
*mock.Call
}
// PublishJobCompleted is a helper method to define mock.On call
// - msg *actions.JobCompleted
func (_e *MockPublisher_Expecter) PublishJobCompleted(msg interface{}) *MockPublisher_PublishJobCompleted_Call {
return &MockPublisher_PublishJobCompleted_Call{Call: _e.mock.On("PublishJobCompleted", msg)}
}
func (_c *MockPublisher_PublishJobCompleted_Call) Run(run func(msg *actions.JobCompleted)) *MockPublisher_PublishJobCompleted_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *actions.JobCompleted
if args[0] != nil {
arg0 = args[0].(*actions.JobCompleted)
}
run(
arg0,
)
})
return _c
}
func (_c *MockPublisher_PublishJobCompleted_Call) Return() *MockPublisher_PublishJobCompleted_Call {
_c.Call.Return()
return _c
}
func (_c *MockPublisher_PublishJobCompleted_Call) RunAndReturn(run func(msg *actions.JobCompleted)) *MockPublisher_PublishJobCompleted_Call {
_c.Run(run)
return _c
}
// PublishJobStarted provides a mock function for the type MockPublisher
func (_mock *MockPublisher) PublishJobStarted(msg *actions.JobStarted) {
_mock.Called(msg)
return
}
// MockPublisher_PublishJobStarted_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PublishJobStarted'
type MockPublisher_PublishJobStarted_Call struct {
*mock.Call
}
// PublishJobStarted is a helper method to define mock.On call
// - msg *actions.JobStarted
func (_e *MockPublisher_Expecter) PublishJobStarted(msg interface{}) *MockPublisher_PublishJobStarted_Call {
return &MockPublisher_PublishJobStarted_Call{Call: _e.mock.On("PublishJobStarted", msg)}
}
func (_c *MockPublisher_PublishJobStarted_Call) Run(run func(msg *actions.JobStarted)) *MockPublisher_PublishJobStarted_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *actions.JobStarted
if args[0] != nil {
arg0 = args[0].(*actions.JobStarted)
}
run(
arg0,
)
})
return _c
}
func (_c *MockPublisher_PublishJobStarted_Call) Return() *MockPublisher_PublishJobStarted_Call {
_c.Call.Return()
return _c
}
func (_c *MockPublisher_PublishJobStarted_Call) RunAndReturn(run func(msg *actions.JobStarted)) *MockPublisher_PublishJobStarted_Call {
_c.Run(run)
return _c
}
// PublishStatic provides a mock function for the type MockPublisher
func (_mock *MockPublisher) PublishStatic(min int, max int) {
_mock.Called(min, max)
return
}
// MockPublisher_PublishStatic_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PublishStatic'
type MockPublisher_PublishStatic_Call struct {
*mock.Call
}
// PublishStatic is a helper method to define mock.On call
// - min int
// - max int
func (_e *MockPublisher_Expecter) PublishStatic(min interface{}, max interface{}) *MockPublisher_PublishStatic_Call {
return &MockPublisher_PublishStatic_Call{Call: _e.mock.On("PublishStatic", min, max)}
}
func (_c *MockPublisher_PublishStatic_Call) Run(run func(min int, max int)) *MockPublisher_PublishStatic_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 int
if args[0] != nil {
arg0 = args[0].(int)
}
var arg1 int
if args[1] != nil {
arg1 = args[1].(int)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockPublisher_PublishStatic_Call) Return() *MockPublisher_PublishStatic_Call {
_c.Call.Return()
return _c
}
func (_c *MockPublisher_PublishStatic_Call) RunAndReturn(run func(min int, max int)) *MockPublisher_PublishStatic_Call {
_c.Run(run)
return _c
}
// PublishStatistics provides a mock function for the type MockPublisher
func (_mock *MockPublisher) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
_mock.Called(stats)
return
}
// MockPublisher_PublishStatistics_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PublishStatistics'
type MockPublisher_PublishStatistics_Call struct {
*mock.Call
}
// PublishStatistics is a helper method to define mock.On call
// - stats *actions.RunnerScaleSetStatistic
func (_e *MockPublisher_Expecter) PublishStatistics(stats interface{}) *MockPublisher_PublishStatistics_Call {
return &MockPublisher_PublishStatistics_Call{Call: _e.mock.On("PublishStatistics", stats)}
}
func (_c *MockPublisher_PublishStatistics_Call) Run(run func(stats *actions.RunnerScaleSetStatistic)) *MockPublisher_PublishStatistics_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *actions.RunnerScaleSetStatistic
if args[0] != nil {
arg0 = args[0].(*actions.RunnerScaleSetStatistic)
}
run(
arg0,
)
})
return _c
}
func (_c *MockPublisher_PublishStatistics_Call) Return() *MockPublisher_PublishStatistics_Call {
_c.Call.Return()
return _c
}
func (_c *MockPublisher_PublishStatistics_Call) RunAndReturn(run func(stats *actions.RunnerScaleSetStatistic)) *MockPublisher_PublishStatistics_Call {
_c.Run(run)
return _c
}

View File

@@ -1,296 +0,0 @@
// Code generated by mockery; DO NOT EDIT.
// github.com/vektra/mockery
// template: testify
package mocks
import (
"context"
"github.com/actions/actions-runner-controller/github/actions"
mock "github.com/stretchr/testify/mock"
)
// NewServerPublisher creates a new instance of ServerPublisher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewServerPublisher(t interface {
mock.TestingT
Cleanup(func())
}) *ServerPublisher {
mock := &ServerPublisher{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
// ServerPublisher is an autogenerated mock type for the ServerExporter type
type ServerPublisher struct {
mock.Mock
}
type ServerPublisher_Expecter struct {
mock *mock.Mock
}
func (_m *ServerPublisher) EXPECT() *ServerPublisher_Expecter {
return &ServerPublisher_Expecter{mock: &_m.Mock}
}
// ListenAndServe provides a mock function for the type ServerPublisher
func (_mock *ServerPublisher) ListenAndServe(ctx context.Context) error {
ret := _mock.Called(ctx)
if len(ret) == 0 {
panic("no return value specified for ListenAndServe")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = returnFunc(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// ServerPublisher_ListenAndServe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListenAndServe'
type ServerPublisher_ListenAndServe_Call struct {
*mock.Call
}
// ListenAndServe is a helper method to define mock.On call
// - ctx context.Context
func (_e *ServerPublisher_Expecter) ListenAndServe(ctx interface{}) *ServerPublisher_ListenAndServe_Call {
return &ServerPublisher_ListenAndServe_Call{Call: _e.mock.On("ListenAndServe", ctx)}
}
func (_c *ServerPublisher_ListenAndServe_Call) Run(run func(ctx context.Context)) *ServerPublisher_ListenAndServe_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
run(
arg0,
)
})
return _c
}
func (_c *ServerPublisher_ListenAndServe_Call) Return(err error) *ServerPublisher_ListenAndServe_Call {
_c.Call.Return(err)
return _c
}
func (_c *ServerPublisher_ListenAndServe_Call) RunAndReturn(run func(ctx context.Context) error) *ServerPublisher_ListenAndServe_Call {
_c.Call.Return(run)
return _c
}
// PublishDesiredRunners provides a mock function for the type ServerPublisher
func (_mock *ServerPublisher) PublishDesiredRunners(count int) {
_mock.Called(count)
return
}
// ServerPublisher_PublishDesiredRunners_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PublishDesiredRunners'
type ServerPublisher_PublishDesiredRunners_Call struct {
*mock.Call
}
// PublishDesiredRunners is a helper method to define mock.On call
// - count int
func (_e *ServerPublisher_Expecter) PublishDesiredRunners(count interface{}) *ServerPublisher_PublishDesiredRunners_Call {
return &ServerPublisher_PublishDesiredRunners_Call{Call: _e.mock.On("PublishDesiredRunners", count)}
}
func (_c *ServerPublisher_PublishDesiredRunners_Call) Run(run func(count int)) *ServerPublisher_PublishDesiredRunners_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 int
if args[0] != nil {
arg0 = args[0].(int)
}
run(
arg0,
)
})
return _c
}
func (_c *ServerPublisher_PublishDesiredRunners_Call) Return() *ServerPublisher_PublishDesiredRunners_Call {
_c.Call.Return()
return _c
}
func (_c *ServerPublisher_PublishDesiredRunners_Call) RunAndReturn(run func(count int)) *ServerPublisher_PublishDesiredRunners_Call {
_c.Run(run)
return _c
}
// PublishJobCompleted provides a mock function for the type ServerPublisher
func (_mock *ServerPublisher) PublishJobCompleted(msg *actions.JobCompleted) {
_mock.Called(msg)
return
}
// ServerPublisher_PublishJobCompleted_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PublishJobCompleted'
type ServerPublisher_PublishJobCompleted_Call struct {
*mock.Call
}
// PublishJobCompleted is a helper method to define mock.On call
// - msg *actions.JobCompleted
func (_e *ServerPublisher_Expecter) PublishJobCompleted(msg interface{}) *ServerPublisher_PublishJobCompleted_Call {
return &ServerPublisher_PublishJobCompleted_Call{Call: _e.mock.On("PublishJobCompleted", msg)}
}
func (_c *ServerPublisher_PublishJobCompleted_Call) Run(run func(msg *actions.JobCompleted)) *ServerPublisher_PublishJobCompleted_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *actions.JobCompleted
if args[0] != nil {
arg0 = args[0].(*actions.JobCompleted)
}
run(
arg0,
)
})
return _c
}
func (_c *ServerPublisher_PublishJobCompleted_Call) Return() *ServerPublisher_PublishJobCompleted_Call {
_c.Call.Return()
return _c
}
func (_c *ServerPublisher_PublishJobCompleted_Call) RunAndReturn(run func(msg *actions.JobCompleted)) *ServerPublisher_PublishJobCompleted_Call {
_c.Run(run)
return _c
}
// PublishJobStarted provides a mock function for the type ServerPublisher
func (_mock *ServerPublisher) PublishJobStarted(msg *actions.JobStarted) {
_mock.Called(msg)
return
}
// ServerPublisher_PublishJobStarted_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PublishJobStarted'
type ServerPublisher_PublishJobStarted_Call struct {
*mock.Call
}
// PublishJobStarted is a helper method to define mock.On call
// - msg *actions.JobStarted
func (_e *ServerPublisher_Expecter) PublishJobStarted(msg interface{}) *ServerPublisher_PublishJobStarted_Call {
return &ServerPublisher_PublishJobStarted_Call{Call: _e.mock.On("PublishJobStarted", msg)}
}
func (_c *ServerPublisher_PublishJobStarted_Call) Run(run func(msg *actions.JobStarted)) *ServerPublisher_PublishJobStarted_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *actions.JobStarted
if args[0] != nil {
arg0 = args[0].(*actions.JobStarted)
}
run(
arg0,
)
})
return _c
}
func (_c *ServerPublisher_PublishJobStarted_Call) Return() *ServerPublisher_PublishJobStarted_Call {
_c.Call.Return()
return _c
}
func (_c *ServerPublisher_PublishJobStarted_Call) RunAndReturn(run func(msg *actions.JobStarted)) *ServerPublisher_PublishJobStarted_Call {
_c.Run(run)
return _c
}
// PublishStatic provides a mock function for the type ServerPublisher
func (_mock *ServerPublisher) PublishStatic(min int, max int) {
_mock.Called(min, max)
return
}
// ServerPublisher_PublishStatic_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PublishStatic'
type ServerPublisher_PublishStatic_Call struct {
*mock.Call
}
// PublishStatic is a helper method to define mock.On call
// - min int
// - max int
func (_e *ServerPublisher_Expecter) PublishStatic(min interface{}, max interface{}) *ServerPublisher_PublishStatic_Call {
return &ServerPublisher_PublishStatic_Call{Call: _e.mock.On("PublishStatic", min, max)}
}
func (_c *ServerPublisher_PublishStatic_Call) Run(run func(min int, max int)) *ServerPublisher_PublishStatic_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 int
if args[0] != nil {
arg0 = args[0].(int)
}
var arg1 int
if args[1] != nil {
arg1 = args[1].(int)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *ServerPublisher_PublishStatic_Call) Return() *ServerPublisher_PublishStatic_Call {
_c.Call.Return()
return _c
}
func (_c *ServerPublisher_PublishStatic_Call) RunAndReturn(run func(min int, max int)) *ServerPublisher_PublishStatic_Call {
_c.Run(run)
return _c
}
// PublishStatistics provides a mock function for the type ServerPublisher
func (_mock *ServerPublisher) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
_mock.Called(stats)
return
}
// ServerPublisher_PublishStatistics_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PublishStatistics'
type ServerPublisher_PublishStatistics_Call struct {
*mock.Call
}
// PublishStatistics is a helper method to define mock.On call
// - stats *actions.RunnerScaleSetStatistic
func (_e *ServerPublisher_Expecter) PublishStatistics(stats interface{}) *ServerPublisher_PublishStatistics_Call {
return &ServerPublisher_PublishStatistics_Call{Call: _e.mock.On("PublishStatistics", stats)}
}
func (_c *ServerPublisher_PublishStatistics_Call) Run(run func(stats *actions.RunnerScaleSetStatistic)) *ServerPublisher_PublishStatistics_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *actions.RunnerScaleSetStatistic
if args[0] != nil {
arg0 = args[0].(*actions.RunnerScaleSetStatistic)
}
run(
arg0,
)
})
return _c
}
func (_c *ServerPublisher_PublishStatistics_Call) Return() *ServerPublisher_PublishStatistics_Call {
_c.Call.Return()
return _c
}
func (_c *ServerPublisher_PublishStatistics_Call) RunAndReturn(run func(stats *actions.RunnerScaleSetStatistic)) *ServerPublisher_PublishStatistics_Call {
_c.Run(run)
return _c
}

View File

@@ -0,0 +1,529 @@
// Code generated by mockery; DO NOT EDIT.
// github.com/vektra/mockery
// template: testify
package metrics
import (
"context"
"github.com/actions/scaleset"
mock "github.com/stretchr/testify/mock"
)
// NewMockRecorder creates a new instance of MockRecorder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockRecorder(t interface {
mock.TestingT
Cleanup(func())
}) *MockRecorder {
mock := &MockRecorder{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
// MockRecorder is an autogenerated mock type for the Recorder type
type MockRecorder struct {
mock.Mock
}
type MockRecorder_Expecter struct {
mock *mock.Mock
}
func (_m *MockRecorder) EXPECT() *MockRecorder_Expecter {
return &MockRecorder_Expecter{mock: &_m.Mock}
}
// RecordDesiredRunners provides a mock function for the type MockRecorder
func (_mock *MockRecorder) RecordDesiredRunners(count int) {
_mock.Called(count)
return
}
// MockRecorder_RecordDesiredRunners_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordDesiredRunners'
type MockRecorder_RecordDesiredRunners_Call struct {
*mock.Call
}
// RecordDesiredRunners is a helper method to define mock.On call
// - count int
func (_e *MockRecorder_Expecter) RecordDesiredRunners(count interface{}) *MockRecorder_RecordDesiredRunners_Call {
return &MockRecorder_RecordDesiredRunners_Call{Call: _e.mock.On("RecordDesiredRunners", count)}
}
func (_c *MockRecorder_RecordDesiredRunners_Call) Run(run func(count int)) *MockRecorder_RecordDesiredRunners_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 int
if args[0] != nil {
arg0 = args[0].(int)
}
run(
arg0,
)
})
return _c
}
func (_c *MockRecorder_RecordDesiredRunners_Call) Return() *MockRecorder_RecordDesiredRunners_Call {
_c.Call.Return()
return _c
}
func (_c *MockRecorder_RecordDesiredRunners_Call) RunAndReturn(run func(count int)) *MockRecorder_RecordDesiredRunners_Call {
_c.Run(run)
return _c
}
// RecordJobCompleted provides a mock function for the type MockRecorder
func (_mock *MockRecorder) RecordJobCompleted(msg *scaleset.JobCompleted) {
_mock.Called(msg)
return
}
// MockRecorder_RecordJobCompleted_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordJobCompleted'
type MockRecorder_RecordJobCompleted_Call struct {
*mock.Call
}
// RecordJobCompleted is a helper method to define mock.On call
// - msg *scaleset.JobCompleted
func (_e *MockRecorder_Expecter) RecordJobCompleted(msg interface{}) *MockRecorder_RecordJobCompleted_Call {
return &MockRecorder_RecordJobCompleted_Call{Call: _e.mock.On("RecordJobCompleted", msg)}
}
func (_c *MockRecorder_RecordJobCompleted_Call) Run(run func(msg *scaleset.JobCompleted)) *MockRecorder_RecordJobCompleted_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *scaleset.JobCompleted
if args[0] != nil {
arg0 = args[0].(*scaleset.JobCompleted)
}
run(
arg0,
)
})
return _c
}
func (_c *MockRecorder_RecordJobCompleted_Call) Return() *MockRecorder_RecordJobCompleted_Call {
_c.Call.Return()
return _c
}
func (_c *MockRecorder_RecordJobCompleted_Call) RunAndReturn(run func(msg *scaleset.JobCompleted)) *MockRecorder_RecordJobCompleted_Call {
_c.Run(run)
return _c
}
// RecordJobStarted provides a mock function for the type MockRecorder
func (_mock *MockRecorder) RecordJobStarted(msg *scaleset.JobStarted) {
_mock.Called(msg)
return
}
// MockRecorder_RecordJobStarted_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordJobStarted'
type MockRecorder_RecordJobStarted_Call struct {
*mock.Call
}
// RecordJobStarted is a helper method to define mock.On call
// - msg *scaleset.JobStarted
func (_e *MockRecorder_Expecter) RecordJobStarted(msg interface{}) *MockRecorder_RecordJobStarted_Call {
return &MockRecorder_RecordJobStarted_Call{Call: _e.mock.On("RecordJobStarted", msg)}
}
func (_c *MockRecorder_RecordJobStarted_Call) Run(run func(msg *scaleset.JobStarted)) *MockRecorder_RecordJobStarted_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *scaleset.JobStarted
if args[0] != nil {
arg0 = args[0].(*scaleset.JobStarted)
}
run(
arg0,
)
})
return _c
}
func (_c *MockRecorder_RecordJobStarted_Call) Return() *MockRecorder_RecordJobStarted_Call {
_c.Call.Return()
return _c
}
func (_c *MockRecorder_RecordJobStarted_Call) RunAndReturn(run func(msg *scaleset.JobStarted)) *MockRecorder_RecordJobStarted_Call {
_c.Run(run)
return _c
}
// RecordStatic provides a mock function for the type MockRecorder
func (_mock *MockRecorder) RecordStatic(min int, max int) {
_mock.Called(min, max)
return
}
// MockRecorder_RecordStatic_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordStatic'
type MockRecorder_RecordStatic_Call struct {
*mock.Call
}
// RecordStatic is a helper method to define mock.On call
// - min int
// - max int
func (_e *MockRecorder_Expecter) RecordStatic(min interface{}, max interface{}) *MockRecorder_RecordStatic_Call {
return &MockRecorder_RecordStatic_Call{Call: _e.mock.On("RecordStatic", min, max)}
}
func (_c *MockRecorder_RecordStatic_Call) Run(run func(min int, max int)) *MockRecorder_RecordStatic_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 int
if args[0] != nil {
arg0 = args[0].(int)
}
var arg1 int
if args[1] != nil {
arg1 = args[1].(int)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockRecorder_RecordStatic_Call) Return() *MockRecorder_RecordStatic_Call {
_c.Call.Return()
return _c
}
func (_c *MockRecorder_RecordStatic_Call) RunAndReturn(run func(min int, max int)) *MockRecorder_RecordStatic_Call {
_c.Run(run)
return _c
}
// RecordStatistics provides a mock function for the type MockRecorder
func (_mock *MockRecorder) RecordStatistics(stats *scaleset.RunnerScaleSetStatistic) {
_mock.Called(stats)
return
}
// MockRecorder_RecordStatistics_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordStatistics'
type MockRecorder_RecordStatistics_Call struct {
*mock.Call
}
// RecordStatistics is a helper method to define mock.On call
// - stats *scaleset.RunnerScaleSetStatistic
func (_e *MockRecorder_Expecter) RecordStatistics(stats interface{}) *MockRecorder_RecordStatistics_Call {
return &MockRecorder_RecordStatistics_Call{Call: _e.mock.On("RecordStatistics", stats)}
}
func (_c *MockRecorder_RecordStatistics_Call) Run(run func(stats *scaleset.RunnerScaleSetStatistic)) *MockRecorder_RecordStatistics_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *scaleset.RunnerScaleSetStatistic
if args[0] != nil {
arg0 = args[0].(*scaleset.RunnerScaleSetStatistic)
}
run(
arg0,
)
})
return _c
}
func (_c *MockRecorder_RecordStatistics_Call) Return() *MockRecorder_RecordStatistics_Call {
_c.Call.Return()
return _c
}
func (_c *MockRecorder_RecordStatistics_Call) RunAndReturn(run func(stats *scaleset.RunnerScaleSetStatistic)) *MockRecorder_RecordStatistics_Call {
_c.Run(run)
return _c
}
// NewMockServerExporter creates a new instance of MockServerExporter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockServerExporter(t interface {
mock.TestingT
Cleanup(func())
}) *MockServerExporter {
mock := &MockServerExporter{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
// MockServerExporter is an autogenerated mock type for the ServerExporter type
type MockServerExporter struct {
mock.Mock
}
type MockServerExporter_Expecter struct {
mock *mock.Mock
}
func (_m *MockServerExporter) EXPECT() *MockServerExporter_Expecter {
return &MockServerExporter_Expecter{mock: &_m.Mock}
}
// ListenAndServe provides a mock function for the type MockServerExporter
func (_mock *MockServerExporter) ListenAndServe(ctx context.Context) error {
ret := _mock.Called(ctx)
if len(ret) == 0 {
panic("no return value specified for ListenAndServe")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = returnFunc(ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockServerExporter_ListenAndServe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListenAndServe'
type MockServerExporter_ListenAndServe_Call struct {
*mock.Call
}
// ListenAndServe is a helper method to define mock.On call
// - ctx context.Context
func (_e *MockServerExporter_Expecter) ListenAndServe(ctx interface{}) *MockServerExporter_ListenAndServe_Call {
return &MockServerExporter_ListenAndServe_Call{Call: _e.mock.On("ListenAndServe", ctx)}
}
func (_c *MockServerExporter_ListenAndServe_Call) Run(run func(ctx context.Context)) *MockServerExporter_ListenAndServe_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
run(
arg0,
)
})
return _c
}
func (_c *MockServerExporter_ListenAndServe_Call) Return(err error) *MockServerExporter_ListenAndServe_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockServerExporter_ListenAndServe_Call) RunAndReturn(run func(ctx context.Context) error) *MockServerExporter_ListenAndServe_Call {
_c.Call.Return(run)
return _c
}
// RecordDesiredRunners provides a mock function for the type MockServerExporter
func (_mock *MockServerExporter) RecordDesiredRunners(count int) {
_mock.Called(count)
return
}
// MockServerExporter_RecordDesiredRunners_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordDesiredRunners'
type MockServerExporter_RecordDesiredRunners_Call struct {
*mock.Call
}
// RecordDesiredRunners is a helper method to define mock.On call
// - count int
func (_e *MockServerExporter_Expecter) RecordDesiredRunners(count interface{}) *MockServerExporter_RecordDesiredRunners_Call {
return &MockServerExporter_RecordDesiredRunners_Call{Call: _e.mock.On("RecordDesiredRunners", count)}
}
func (_c *MockServerExporter_RecordDesiredRunners_Call) Run(run func(count int)) *MockServerExporter_RecordDesiredRunners_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 int
if args[0] != nil {
arg0 = args[0].(int)
}
run(
arg0,
)
})
return _c
}
func (_c *MockServerExporter_RecordDesiredRunners_Call) Return() *MockServerExporter_RecordDesiredRunners_Call {
_c.Call.Return()
return _c
}
func (_c *MockServerExporter_RecordDesiredRunners_Call) RunAndReturn(run func(count int)) *MockServerExporter_RecordDesiredRunners_Call {
_c.Run(run)
return _c
}
// RecordJobCompleted provides a mock function for the type MockServerExporter
func (_mock *MockServerExporter) RecordJobCompleted(msg *scaleset.JobCompleted) {
_mock.Called(msg)
return
}
// MockServerExporter_RecordJobCompleted_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordJobCompleted'
type MockServerExporter_RecordJobCompleted_Call struct {
*mock.Call
}
// RecordJobCompleted is a helper method to define mock.On call
// - msg *scaleset.JobCompleted
func (_e *MockServerExporter_Expecter) RecordJobCompleted(msg interface{}) *MockServerExporter_RecordJobCompleted_Call {
return &MockServerExporter_RecordJobCompleted_Call{Call: _e.mock.On("RecordJobCompleted", msg)}
}
func (_c *MockServerExporter_RecordJobCompleted_Call) Run(run func(msg *scaleset.JobCompleted)) *MockServerExporter_RecordJobCompleted_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *scaleset.JobCompleted
if args[0] != nil {
arg0 = args[0].(*scaleset.JobCompleted)
}
run(
arg0,
)
})
return _c
}
func (_c *MockServerExporter_RecordJobCompleted_Call) Return() *MockServerExporter_RecordJobCompleted_Call {
_c.Call.Return()
return _c
}
func (_c *MockServerExporter_RecordJobCompleted_Call) RunAndReturn(run func(msg *scaleset.JobCompleted)) *MockServerExporter_RecordJobCompleted_Call {
_c.Run(run)
return _c
}
// RecordJobStarted provides a mock function for the type MockServerExporter
func (_mock *MockServerExporter) RecordJobStarted(msg *scaleset.JobStarted) {
_mock.Called(msg)
return
}
// MockServerExporter_RecordJobStarted_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordJobStarted'
type MockServerExporter_RecordJobStarted_Call struct {
*mock.Call
}
// RecordJobStarted is a helper method to define mock.On call
// - msg *scaleset.JobStarted
func (_e *MockServerExporter_Expecter) RecordJobStarted(msg interface{}) *MockServerExporter_RecordJobStarted_Call {
return &MockServerExporter_RecordJobStarted_Call{Call: _e.mock.On("RecordJobStarted", msg)}
}
func (_c *MockServerExporter_RecordJobStarted_Call) Run(run func(msg *scaleset.JobStarted)) *MockServerExporter_RecordJobStarted_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *scaleset.JobStarted
if args[0] != nil {
arg0 = args[0].(*scaleset.JobStarted)
}
run(
arg0,
)
})
return _c
}
func (_c *MockServerExporter_RecordJobStarted_Call) Return() *MockServerExporter_RecordJobStarted_Call {
_c.Call.Return()
return _c
}
func (_c *MockServerExporter_RecordJobStarted_Call) RunAndReturn(run func(msg *scaleset.JobStarted)) *MockServerExporter_RecordJobStarted_Call {
_c.Run(run)
return _c
}
// RecordStatic provides a mock function for the type MockServerExporter
func (_mock *MockServerExporter) RecordStatic(min int, max int) {
_mock.Called(min, max)
return
}
// MockServerExporter_RecordStatic_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordStatic'
type MockServerExporter_RecordStatic_Call struct {
*mock.Call
}
// RecordStatic is a helper method to define mock.On call
// - min int
// - max int
func (_e *MockServerExporter_Expecter) RecordStatic(min interface{}, max interface{}) *MockServerExporter_RecordStatic_Call {
return &MockServerExporter_RecordStatic_Call{Call: _e.mock.On("RecordStatic", min, max)}
}
func (_c *MockServerExporter_RecordStatic_Call) Run(run func(min int, max int)) *MockServerExporter_RecordStatic_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 int
if args[0] != nil {
arg0 = args[0].(int)
}
var arg1 int
if args[1] != nil {
arg1 = args[1].(int)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockServerExporter_RecordStatic_Call) Return() *MockServerExporter_RecordStatic_Call {
_c.Call.Return()
return _c
}
func (_c *MockServerExporter_RecordStatic_Call) RunAndReturn(run func(min int, max int)) *MockServerExporter_RecordStatic_Call {
_c.Run(run)
return _c
}
// RecordStatistics provides a mock function for the type MockServerExporter
func (_mock *MockServerExporter) RecordStatistics(stats *scaleset.RunnerScaleSetStatistic) {
_mock.Called(stats)
return
}
// MockServerExporter_RecordStatistics_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordStatistics'
type MockServerExporter_RecordStatistics_Call struct {
*mock.Call
}
// RecordStatistics is a helper method to define mock.On call
// - stats *scaleset.RunnerScaleSetStatistic
func (_e *MockServerExporter_Expecter) RecordStatistics(stats interface{}) *MockServerExporter_RecordStatistics_Call {
return &MockServerExporter_RecordStatistics_Call{Call: _e.mock.On("RecordStatistics", stats)}
}
func (_c *MockServerExporter_RecordStatistics_Call) Run(run func(stats *scaleset.RunnerScaleSetStatistic)) *MockServerExporter_RecordStatistics_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *scaleset.RunnerScaleSetStatistic
if args[0] != nil {
arg0 = args[0].(*scaleset.RunnerScaleSetStatistic)
}
run(
arg0,
)
})
return _c
}
func (_c *MockServerExporter_RecordStatistics_Call) Return() *MockServerExporter_RecordStatistics_Call {
_c.Call.Return()
return _c
}
func (_c *MockServerExporter_RecordStatistics_Call) RunAndReturn(run func(stats *scaleset.RunnerScaleSetStatistic)) *MockServerExporter_RecordStatistics_Call {
_c.Run(run)
return _c
}

View File

@@ -1,30 +1,27 @@
package worker
package scaler
import (
"context"
"encoding/json"
"fmt"
"log/slog"
"math"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/logging"
"github.com/actions/scaleset"
"github.com/actions/scaleset/listener"
jsonpatch "github.com/evanphx/json-patch"
"github.com/go-logr/logr"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
const workerName = "kubernetesworker"
type Option func(*Scaler)
type Option func(*Worker)
func WithLogger(logger logr.Logger) Option {
return func(w *Worker) {
logger = logger.WithName(workerName)
w.logger = &logger
func WithLogger(logger *slog.Logger) Option {
return func(w *Scaler) {
w.logger = logger
}
}
@@ -35,23 +32,25 @@ type Config struct {
MinRunners int
}
// The Worker's role is to process the messages it receives from the listener.
// The Scaler's role is to process the messages it receives from the listener.
// It then initiates Kubernetes API requests to carry out the necessary actions.
type Worker struct {
clientset *kubernetes.Clientset
config Config
lastPatch int
patchSeq int
logger *logr.Logger
type Scaler struct {
clientset *kubernetes.Clientset
config Config
targetRunners int
patchSeq int
// dirty is set when there are any events handled before the desired count is called.
dirty bool
logger *slog.Logger
}
var _ listener.Handler = (*Worker)(nil)
var _ listener.Scaler = (*Scaler)(nil)
func New(config Config, options ...Option) (*Worker, error) {
w := &Worker{
config: config,
lastPatch: -1,
patchSeq: -1,
func New(config Config, options ...Option) (*Scaler, error) {
w := &Scaler{
config: config,
targetRunners: -1,
patchSeq: -1,
}
conf, err := rest.InClusterConfig()
@@ -77,14 +76,9 @@ func New(config Config, options ...Option) (*Worker, error) {
return w, nil
}
func (w *Worker) applyDefaults() error {
func (w *Scaler) applyDefaults() error {
if w.logger == nil {
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatJSON)
if err != nil {
return fmt.Errorf("NewLogger failed: %w", err)
}
logger = logger.WithName(workerName)
w.logger = &logger
w.logger = slog.New(slog.DiscardHandler)
}
return nil
@@ -95,7 +89,7 @@ func (w *Worker) applyDefaults() error {
// This update marks the ephemeral runner so that the controller would have more context
// about the ephemeral runner that should not be deleted when scaling down.
// It returns an error if there is any issue with updating the job information.
func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
func (w *Scaler) HandleJobStarted(ctx context.Context, jobInfo *scaleset.JobStarted) error {
w.logger.Info("Updating job info for the runner",
"runnerName", jobInfo.RunnerName,
"ownerName", jobInfo.OwnerName,
@@ -106,6 +100,8 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
"jobDisplayName", jobInfo.JobDisplayName,
"requestId", jobInfo.RunnerRequestID)
w.dirty = true
original, err := json.Marshal(&v1alpha1.EphemeralRunner{})
if err != nil {
return fmt.Errorf("failed to marshal empty ephemeral runner: %w", err)
@@ -158,6 +154,11 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
return nil
}
func (w *Scaler) HandleJobCompleted(ctx context.Context, msg *scaleset.JobCompleted) error {
w.dirty = true
return nil
}
// HandleDesiredRunnerCount handles the desired runner count by scaling the ephemeral runner set.
// The function calculates the target runner count based on the minimum and maximum runner count configuration.
// If the target runner count is the same as the last patched count, it skips patching and returns nil.
@@ -165,8 +166,8 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
// The function then scales the ephemeral runner set by applying the merge patch.
// Finally, it logs the scaled ephemeral runner set details and returns nil if successful.
// If any error occurs during the process, it returns an error with a descriptive message.
func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count, jobsCompleted int) (int, error) {
patchID := w.setDesiredWorkerState(count, jobsCompleted)
func (w *Scaler) HandleDesiredRunnerCount(ctx context.Context, count int) (int, error) {
patchID := w.setDesiredWorkerState(count)
original, err := json.Marshal(
&v1alpha1.EphemeralRunnerSet{
@@ -183,13 +184,13 @@ func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count, jobsComple
patch, err := json.Marshal(
&v1alpha1.EphemeralRunnerSet{
Spec: v1alpha1.EphemeralRunnerSetSpec{
Replicas: w.lastPatch,
Replicas: w.targetRunners,
PatchID: patchID,
},
},
)
if err != nil {
w.logger.Error(err, "could not marshal patch ephemeral runner set")
w.logger.Error("could not marshal patch ephemeral runner set", "error", err.Error())
return 0, err
}
@@ -220,30 +221,31 @@ func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count, jobsComple
"name", w.config.EphemeralRunnerSetName,
"replicas", patchedEphemeralRunnerSet.Spec.Replicas,
)
return w.lastPatch, nil
return w.targetRunners, nil
}
// calculateDesiredState calculates the desired state of the worker based on the desired count and the the number of jobs completed.
func (w *Worker) setDesiredWorkerState(count, jobsCompleted int) int {
// Max runners should always be set by the resource builder either to the configured value,
// or the maximum int32 (resourcebuilder.newAutoScalingListener()).
targetRunnerCount := min(w.config.MinRunners+count, w.config.MaxRunners)
w.patchSeq++
desiredPatchID := w.patchSeq
func (w *Scaler) setDesiredWorkerState(count int) int {
dirty := w.dirty
w.dirty = false
if count == 0 && jobsCompleted == 0 { // empty batch
targetRunnerCount = max(w.lastPatch, targetRunnerCount)
if targetRunnerCount == w.config.MinRunners {
// We have an empty batch, and the last patch was the min runners.
// Since this is an empty batch, and we are at the min runners, they should all be idle.
// If controller created few more pods on accident (during scale down events),
// this situation allows the controller to scale down to the min runners.
// However, it is important to keep the patch sequence increasing so we don't ignore one batch.
desiredPatchID = 0
}
if w.patchSeq == math.MaxInt32 {
w.patchSeq = 0
}
w.patchSeq++
w.lastPatch = targetRunnerCount
targetRunnerCount := min(w.config.MinRunners+count, w.config.MaxRunners)
oldTargetRunners := w.targetRunners
w.targetRunners = targetRunnerCount
desiredPatchID := w.patchSeq
if !dirty && targetRunnerCount == oldTargetRunners && targetRunnerCount == w.config.MinRunners {
// If there were no events sent, and the target runner count
// is the same as the last patched count, we can force the state.
//
// TODO: see to remove w.config.MinRunenrs from the equation, as it is not relevant to the decision of whether to patch or not.
desiredPatchID = 0
}
w.logger.Info(
"Calculated target runner count",
@@ -251,8 +253,7 @@ func (w *Worker) setDesiredWorkerState(count, jobsCompleted int) int {
"decision", targetRunnerCount,
"min", w.config.MinRunners,
"max", w.config.MaxRunners,
"currentRunnerCount", w.lastPatch,
"jobsCompleted", jobsCompleted,
"currentRunnerCount", w.targetRunners,
)
return desiredPatchID

View File

@@ -1,326 +1,334 @@
package worker
package scaler
import (
"log/slog"
"math"
"testing"
"github.com/go-logr/logr"
"github.com/stretchr/testify/assert"
)
var discardLogger = slog.New(slog.DiscardHandler)
func TestSetDesiredWorkerState_MinMaxDefaults(t *testing.T) {
logger := logr.Discard()
newEmptyWorker := func() *Worker {
return &Worker{
newEmptyWorker := func() *Scaler {
return &Scaler{
config: Config{
MinRunners: 0,
MaxRunners: math.MaxInt32,
},
lastPatch: -1,
patchSeq: -1,
logger: &logger,
targetRunners: -1,
patchSeq: -1,
logger: discardLogger,
}
}
t.Run("init calculate with acquired 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(0, 0)
assert.Equal(t, 0, w.lastPatch)
patchID := w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 0, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
assert.Equal(t, 0, patchID)
})
t.Run("init calculate with acquired 1", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 0)
assert.Equal(t, 1, w.lastPatch)
patchID := w.setDesiredWorkerState(1)
assert.False(t, w.dirty)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
assert.Equal(t, 0, patchID)
})
t.Run("increment patch when job done", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 0)
patchID := w.setDesiredWorkerState(1)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
w.dirty = true
patchID = w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 1, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 0, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("increment patch when called with same parameters", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 0)
patchID := w.setDesiredWorkerState(1)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(1, 0)
patchID = w.setDesiredWorkerState(1)
assert.False(t, w.dirty)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("calculate desired scale when acquired > 0 and completed > 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 1)
w.dirty = true
patchID := w.setDesiredWorkerState(1)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("re-use the last state when acquired == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("adjust when acquired == 0 and completed == 1", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 1)
w.dirty = true
patchID := w.setDesiredWorkerState(1)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
assert.False(t, w.dirty)
w.dirty = true
patchID = w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 1, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 0, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
})
}
func TestSetDesiredWorkerState_MinSet(t *testing.T) {
logger := logr.Discard()
newEmptyWorker := func() *Worker {
return &Worker{
newEmptyWorker := func() *Scaler {
return &Scaler{
config: Config{
MinRunners: 1,
MaxRunners: math.MaxInt32,
},
lastPatch: -1,
patchSeq: -1,
logger: &logger,
targetRunners: -1,
patchSeq: -1,
logger: discardLogger,
}
}
t.Run("initial scale when acquired == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(0, 0)
patchID := w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("re-use the old state on count == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 1, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("request back to 0 on job done", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
patchID := w.setDesiredWorkerState(2)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
w.dirty = true
patchID = w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("desired patch is 0 but sequence continues on empty batch and min runners", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(3, 0)
patchID := w.setDesiredWorkerState(3)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
assert.Equal(t, 4, w.lastPatch)
assert.Equal(t, 4, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
patchID = w.setDesiredWorkerState(0, 3)
w.dirty = true
patchID = w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
// Empty batch on min runners
patchID = w.setDesiredWorkerState(0, 0)
patchID = w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID) // forcing the state
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 2, w.patchSeq)
})
}
func TestSetDesiredWorkerState_MaxSet(t *testing.T) {
logger := logr.Discard()
newEmptyWorker := func() *Worker {
return &Worker{
newEmptyWorker := func() *Scaler {
return &Scaler{
config: Config{
MinRunners: 0,
MaxRunners: 5,
},
lastPatch: -1,
patchSeq: -1,
logger: &logger,
targetRunners: -1,
patchSeq: -1,
logger: discardLogger,
}
}
t.Run("initial scale when acquired == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(0, 0)
patchID := w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 0, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("re-use the old state on count == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 1, patchID)
assert.Equal(t, 2, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("request back to 0 on job done", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
patchID := w.setDesiredWorkerState(2)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
w.dirty = true
patchID = w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 1, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 0, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("scale up to max when count > max", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(6, 0)
patchID := w.setDesiredWorkerState(6)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
assert.Equal(t, 5, w.lastPatch)
assert.Equal(t, 5, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("scale to max when count == max", func(t *testing.T) {
w := newEmptyWorker()
w.setDesiredWorkerState(5, 0)
assert.Equal(t, 5, w.lastPatch)
w.setDesiredWorkerState(5)
assert.False(t, w.dirty)
assert.Equal(t, 5, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("scale to max when count > max and completed > 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 0)
patchID := w.setDesiredWorkerState(1)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(6, 1)
w.dirty = true
patchID = w.setDesiredWorkerState(6)
assert.False(t, w.dirty)
assert.Equal(t, 1, patchID)
assert.Equal(t, 5, w.lastPatch)
assert.Equal(t, 5, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("scale back to 0 when count was > max", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(6, 0)
patchID := w.setDesiredWorkerState(6)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
w.dirty = true
patchID = w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 1, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 0, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("force 0 on empty batch and last patch == min runners", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(3, 0)
patchID := w.setDesiredWorkerState(3)
assert.Equal(t, 0, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 3, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
patchID = w.setDesiredWorkerState(0, 3)
w.dirty = true
patchID = w.setDesiredWorkerState(0)
assert.Equal(t, 1, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 0, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
// Empty batch on min runners
patchID = w.setDesiredWorkerState(0, 0)
patchID = w.setDesiredWorkerState(0)
assert.Equal(t, 0, patchID) // forcing the state
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 0, w.targetRunners)
assert.Equal(t, 2, w.patchSeq)
})
}
func TestSetDesiredWorkerState_MinMaxSet(t *testing.T) {
logger := logr.Discard()
newEmptyWorker := func() *Worker {
return &Worker{
newEmptyWorker := func() *Scaler {
return &Scaler{
config: Config{
MinRunners: 1,
MaxRunners: 3,
},
lastPatch: -1,
patchSeq: -1,
logger: &logger,
targetRunners: -1,
patchSeq: -1,
logger: discardLogger,
}
}
t.Run("initial scale when acquired == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(0, 0)
patchID := w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("re-use the old state on count == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 1, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("scale to min when count == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
patchID := w.setDesiredWorkerState(2)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
w.dirty = true
patchID = w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("scale up to max when count > max", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(4, 0)
patchID := w.setDesiredWorkerState(4)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 3, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("scale to max when count == max", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(3, 0)
patchID := w.setDesiredWorkerState(3)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 3, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("force 0 on empty batch and last patch == min runners", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(3, 0)
patchID := w.setDesiredWorkerState(3)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 3, w.targetRunners)
assert.Equal(t, 0, w.patchSeq)
patchID = w.setDesiredWorkerState(0, 3)
w.dirty = true
patchID = w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 1, w.patchSeq)
// Empty batch on min runners
patchID = w.setDesiredWorkerState(0, 0)
patchID = w.setDesiredWorkerState(0)
assert.False(t, w.dirty)
assert.Equal(t, 0, patchID) // forcing the state
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.targetRunners)
assert.Equal(t, 2, w.patchSeq)
})
}