Initial Commit

This commit is contained in:
Thomas Boop
2022-06-02 15:53:11 -04:00
parent 4c8cc497b3
commit 6159767f90
70 changed files with 30723 additions and 0 deletions

10
packages/docker/README.md Normal file
View File

@@ -0,0 +1,10 @@
# Docker Hooks
## Description
This implementation mirrors the original docker implementation in the [Actions Runner](https://github.com/actions/runner).
Feel free to fork this repo and modify it in order to customize that implementation
## Pre-requisites
The `GITHUB_WORKSPACE` env will be set to the GitHub Workspace. This is done automatically by the actions runner, but may need to be done manually when testing
The docker cli is installed on the machine, and docker is running.

View File

@@ -0,0 +1,13 @@
// eslint-disable-next-line import/no-commonjs
module.exports = {
clearMocks: true,
moduleFileExtensions: ['js', 'ts'],
testEnvironment: 'node',
testMatch: ['**/*-test.ts'],
testRunner: 'jest-circus/runner',
transform: {
'^.+\\.ts$': 'ts-jest'
},
setupFilesAfterEnv: ['./jest.setup.js'],
verbose: true
}

View File

@@ -0,0 +1 @@
jest.setTimeout(90000)

9269
packages/docker/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,29 @@
{
"name": "dockerhooks",
"version": "0.1.0",
"description": "",
"main": "lib/index.js",
"scripts": {
"test": "jest --runInBand",
"build": "npx tsc && npx ncc build"
},
"author": "",
"license": "MIT",
"dependencies": {
"@actions/core": "^1.6.0",
"@actions/exec": "^1.1.1",
"hooklib": "file:../hooklib",
"uuid": "^8.3.2"
},
"devDependencies": {
"@types/jest": "^27.4.1",
"@types/node": "^17.0.23",
"@typescript-eslint/parser": "^5.18.0",
"@vercel/ncc": "^0.33.4",
"jest": "^27.5.1",
"ts-jest": "^27.1.4",
"ts-node": "^10.7.0",
"tsconfig-paths": "^3.14.1",
"typescript": "^4.6.3"
}
}

View File

@@ -0,0 +1,9 @@
export function getRunnerLabel(): string {
const name = process.env.RUNNER_NAME
if (!name) {
throw new Error(
"'RUNNER_NAME' env is required, please contact your self hosted runner administrator"
)
}
return Buffer.from(name).toString('hex')
}

View File

@@ -0,0 +1,413 @@
import * as core from '@actions/core'
import * as fs from 'fs'
import {
ContainerInfo,
JobContainerInfo,
RunContainerStepArgs,
ServiceContainerInfo,
StepContainerInfo
} from 'hooklib/lib'
import path from 'path'
import { env } from 'process'
import { v4 as uuidv4 } from 'uuid'
import { runDockerCommand, RunDockerCommandOptions } from '../utils'
import { getRunnerLabel } from './constants'
export async function createContainer(
args: ContainerInfo,
name: string,
network: string
): Promise<ContainerMetadata> {
if (!args.image) {
throw new Error('Image was expected')
}
const dockerArgs: string[] = ['create']
dockerArgs.push(`--label=${getRunnerLabel()}`)
dockerArgs.push(`--network=${network}`)
if ((args as ServiceContainerInfo)?.contextName) {
dockerArgs.push(
`--network-alias=${(args as ServiceContainerInfo)?.contextName}`
)
}
dockerArgs.push('--name', name)
if (args?.portMappings?.length) {
for (const portMapping of args.portMappings) {
dockerArgs.push('-p', portMapping)
}
}
if (args.createOptions) {
dockerArgs.push(...args.createOptions.split(' '))
}
if (args.environmentVariables) {
for (const [key, value] of Object.entries(args.environmentVariables)) {
dockerArgs.push('-e')
if (!value) {
dockerArgs.push(`"${key}"`)
} else {
dockerArgs.push(`"${key}=${value}"`)
}
}
}
const mountVolumes = [
...(args.userMountVolumes || []),
...((args as JobContainerInfo | StepContainerInfo).systemMountVolumes || [])
]
for (const mountVolume of mountVolumes) {
dockerArgs.push(
`-v=${mountVolume.sourceVolumePath}:${mountVolume.targetVolumePath}`
)
}
if (args.entryPoint) {
dockerArgs.push(`--entrypoint`)
dockerArgs.push(args.entryPoint)
}
dockerArgs.push(args.image)
if (args.entryPointArgs) {
for (const entryPointArg of args.entryPointArgs) {
dockerArgs.push(entryPointArg)
}
}
const id = (await runDockerCommand(dockerArgs)).trim()
if (!id) {
throw new Error('Could not read id from docker command')
}
const response: ContainerMetadata = { id, image: args.image }
if (network) {
response.network = network
}
response.ports = []
if ((args as ServiceContainerInfo).contextName) {
response['contextName'] = (args as ServiceContainerInfo).contextName
}
return response
}
export async function containerPull(
image: string,
configLocation: string
): Promise<void> {
const dockerArgs: string[] = ['pull']
if (configLocation) {
dockerArgs.push('--config')
dockerArgs.push(configLocation)
}
dockerArgs.push(image)
for (let i = 0; i < 3; i++) {
try {
await runDockerCommand(dockerArgs)
return
} catch {
core.info(`docker pull failed on attempt: ${i + 1}`)
}
}
throw new Error('Exiting docker pull after 3 failed attempts')
}
export async function containerStart(id: string): Promise<void> {
const dockerArgs: string[] = ['start']
dockerArgs.push(`${id}`)
await runDockerCommand(dockerArgs)
}
export async function containerStop(id: string | string[]): Promise<void> {
const dockerArgs: string[] = ['stop']
if (Array.isArray(id)) {
for (const v of id) {
dockerArgs.push(v)
}
} else {
dockerArgs.push(id)
}
await runDockerCommand(dockerArgs)
}
export async function containerRemove(id: string | string[]): Promise<void> {
const dockerArgs: string[] = ['rm']
dockerArgs.push('--force')
if (Array.isArray(id)) {
for (const v of id) {
dockerArgs.push(v)
}
} else {
dockerArgs.push(id)
}
await runDockerCommand(dockerArgs)
}
export async function containerBuild(
args: RunContainerStepArgs,
tag: string
): Promise<void> {
const context = path.dirname(`${env.GITHUB_WORKSPACE}/${args.dockerfile}`)
const dockerArgs: string[] = ['build']
dockerArgs.push('-t', tag)
dockerArgs.push('-f', `${env.GITHUB_WORKSPACE}/${args.dockerfile}`)
dockerArgs.push(context)
// TODO: figure out build working directory
await runDockerCommand(dockerArgs, {
workingDir: args['buildWorkingDirectory']
})
}
export async function containerLogs(id: string): Promise<void> {
const dockerArgs: string[] = ['logs']
dockerArgs.push('--details')
dockerArgs.push(id)
await runDockerCommand(dockerArgs)
}
export async function containerNetworkRemove(network: string): Promise<void> {
const dockerArgs: string[] = ['network']
dockerArgs.push('rm')
dockerArgs.push(network)
await runDockerCommand(dockerArgs)
}
export async function containerPrune(): Promise<void> {
const dockerPSArgs: string[] = [
'ps',
'--all',
'--quiet',
'--no-trunc',
'--filter',
`label=${getRunnerLabel()}`
]
const res = (await runDockerCommand(dockerPSArgs)).trim()
if (res) {
await containerRemove(res.split('\n'))
}
}
async function containerHealthStatus(id: string): Promise<ContainerHealth> {
const dockerArgs = [
'inspect',
'--format="{{if .Config.Healthcheck}}{{print .State.Health.Status}}{{end}}"',
id
]
const result = (await runDockerCommand(dockerArgs)).trim().replace(/"/g, '')
if (
result === ContainerHealth.Healthy ||
result === ContainerHealth.Starting ||
result === ContainerHealth.Unhealthy
) {
return result
}
return ContainerHealth.None
}
export async function healthCheck({
id,
image
}: ContainerMetadata): Promise<void> {
let health = await containerHealthStatus(id)
if (health === ContainerHealth.None) {
core.info(
`Healthcheck is not set for container ${image}, considered as ${ContainerHealth.Healthy}`
)
return
}
let tries = 1
while (health === ContainerHealth.Starting && tries < 13) {
const backOffSeconds = Math.pow(2, tries)
core.info(
`Container '${image}' is '${health}', retry in ${backOffSeconds} seconds`
)
await new Promise(resolve => setTimeout(resolve, 1000 * backOffSeconds))
tries++
health = await containerHealthStatus(id)
}
if (health !== ContainerHealth.Healthy) {
throw new String(
`Container '${image}' is unhealthy with status '${health}'`
)
}
}
export async function containerPorts(id: string): Promise<string[]> {
const dockerArgs = ['port', id]
const portMappings = (await runDockerCommand(dockerArgs)).trim()
return portMappings.split('\n')
}
export async function registryLogin(args): Promise<string> {
if (!args.registry) {
return ''
}
const credentials = {
username: args.registry.username,
password: args.registry.password
}
const configLocation = `${env.RUNNER_TEMP}/.docker_${uuidv4()}`
fs.mkdirSync(configLocation)
try {
await dockerLogin(configLocation, args.registry.serverUrl, credentials)
} catch (error) {
fs.rmdirSync(configLocation, { recursive: true })
throw error
}
return configLocation
}
export async function registryLogout(configLocation: string): Promise<void> {
if (configLocation) {
await dockerLogout(configLocation)
fs.rmdirSync(configLocation, { recursive: true })
}
}
async function dockerLogin(
configLocation: string,
registry: string,
credentials: { username: string; password: string }
): Promise<void> {
const credentialsArgs =
credentials.username && credentials.password
? ['-u', credentials.username, '--password-stdin']
: []
const dockerArgs = [
'--config',
configLocation,
'login',
...credentialsArgs,
registry
]
const options: RunDockerCommandOptions =
credentials.username && credentials.password
? {
input: Buffer.from(credentials.password, 'utf-8')
}
: {}
await runDockerCommand(dockerArgs, options)
}
async function dockerLogout(configLocation: string): Promise<void> {
const dockerArgs = ['--config', configLocation, 'logout']
await runDockerCommand(dockerArgs)
}
export async function containerExecStep(
args,
containerId: string
): Promise<void> {
const dockerArgs: string[] = ['exec', '-i']
dockerArgs.push(`--workdir=${args.workingDirectory}`)
for (const [key, value] of Object.entries(args['environmentVariables'])) {
dockerArgs.push('-e')
if (!value) {
dockerArgs.push(`"${key}"`)
} else {
dockerArgs.push(`"${key}=${value}"`)
}
}
// Todo figure out prepend path and update it here
// (we need to pass path in as -e Path={fullpath}) where {fullpath is the prepend path added to the current containers path}
dockerArgs.push(containerId)
dockerArgs.push(args.entryPoint)
for (const entryPointArg of args.entryPointArgs) {
dockerArgs.push(entryPointArg)
}
await runDockerCommand(dockerArgs)
}
export async function containerRun(
args: RunContainerStepArgs,
name: string,
network: string
): Promise<void> {
if (!args.image) {
throw new Error('expected image to be set')
}
const dockerArgs: string[] = ['run', '--rm']
dockerArgs.push('--name', name)
dockerArgs.push(`--workdir=${args.workingDirectory}`)
dockerArgs.push(`--label=${getRunnerLabel()}`)
dockerArgs.push(`--network=${network}`)
if (args.createOptions) {
dockerArgs.push(...args.createOptions.split(' '))
}
if (args.environmentVariables) {
for (const [key, value] of Object.entries(args.environmentVariables)) {
// Pass in this way to avoid printing secrets
env[key] = value ?? undefined
dockerArgs.push('-e')
dockerArgs.push(key)
}
}
const mountVolumes = [
...(args.userMountVolumes || []),
...(args.systemMountVolumes || [])
]
for (const mountVolume of mountVolumes) {
dockerArgs.push(`-v`)
dockerArgs.push(
`${mountVolume.sourceVolumePath}:${mountVolume.targetVolumePath}${
mountVolume.readOnly ? ':ro' : ''
}`
)
}
if (args['entryPoint']) {
dockerArgs.push(`--entrypoint`)
dockerArgs.push(args['entryPoint'])
}
dockerArgs.push(args.image)
if (args.entryPointArgs) {
for (const entryPointArg of args.entryPointArgs) {
dockerArgs.push(entryPointArg)
}
}
await runDockerCommand(dockerArgs)
}
export async function isContainerAlpine(containerId: string): Promise<boolean> {
const dockerArgs: string[] = [
'exec',
containerId,
'sh',
'-c',
"[ $(cat /etc/*release* | grep -i -e '^ID=*alpine*' -c) != 0 ] || exit 1"
]
try {
await runDockerCommand(dockerArgs)
return true
} catch {
return false
}
}
enum ContainerHealth {
Starting = 'starting',
Healthy = 'healthy',
Unhealthy = 'unhealthy',
None = 'none'
}
export interface ContainerMetadata {
id: string
image: string
network?: string
ports?: string[]
contextName?: string
}

View File

@@ -0,0 +1,2 @@
export * from './container'
export * from './network'

View File

@@ -0,0 +1,26 @@
import { runDockerCommand } from '../utils'
import { getRunnerLabel } from './constants'
export async function networkCreate(networkName): Promise<void> {
const dockerArgs: string[] = ['network', 'create']
dockerArgs.push('--label')
dockerArgs.push(getRunnerLabel())
dockerArgs.push(networkName)
await runDockerCommand(dockerArgs)
}
export async function networkRemove(networkName): Promise<void> {
const dockerArgs: string[] = ['network']
dockerArgs.push('rm')
dockerArgs.push(networkName)
await runDockerCommand(dockerArgs)
}
export async function networkPrune(): Promise<void> {
const dockerArgs: string[] = ['network']
dockerArgs.push('prune')
dockerArgs.push('--force')
dockerArgs.push(`--filter`)
dockerArgs.push(`label=${getRunnerLabel()}`)
await runDockerCommand(dockerArgs)
}

View File

@@ -0,0 +1,21 @@
import {
containerRemove,
containerNetworkRemove
} from '../dockerCommands/container'
// eslint-disable-next-line @typescript-eslint/no-unused-vars
export async function cleanupJob(args, state, responseFile): Promise<void> {
const containerIds: string[] = []
if (state?.container) {
containerIds.push(state.container)
}
if (state?.services) {
containerIds.push(state.services)
}
if (containerIds.length > 0) {
await containerRemove(containerIds)
}
if (state.network) {
await containerNetworkRemove(state.network)
}
}

View File

@@ -0,0 +1,4 @@
export * from './cleanup-job'
export * from './prepare-job'
export * from './run-script-step'
export * from './run-container-step'

View File

@@ -0,0 +1,205 @@
import * as core from '@actions/core'
import { ContextPorts, PrepareJobArgs, writeToResponseFile } from 'hooklib/lib'
import { exit } from 'process'
import { v4 as uuidv4 } from 'uuid'
import {
ContainerMetadata,
containerPorts,
containerPrune,
containerPull,
containerStart,
createContainer,
healthCheck,
isContainerAlpine,
registryLogin,
registryLogout
} from '../dockerCommands/container'
import { networkCreate, networkPrune } from '../dockerCommands/network'
import { sanitize } from '../utils'
export async function prepareJob(
args: PrepareJobArgs,
responseFile
): Promise<void> {
await containerPrune()
await networkPrune()
const container = args.container
const services = args.services
if (!container?.image && !services?.length) {
core.info('No containers exist, skipping hook invocation')
exit(0)
}
const networkName = generateNetworkName()
// Create network
await networkCreate(networkName)
// Create Job Container
let containerMetadata: ContainerMetadata | undefined = undefined
if (!container?.image) {
core.info('No job container provided, skipping')
} else {
setupContainer(container)
const configLocation = await registryLogin(container.registry)
try {
await containerPull(container.image, configLocation)
} finally {
await registryLogout(configLocation)
}
containerMetadata = await createContainer(
container,
generateContainerName(container.image),
networkName
)
if (!containerMetadata?.id) {
throw new Error('Failed to create container')
}
await containerStart(containerMetadata?.id)
}
// Create Service Containers
const servicesMetadata: ContainerMetadata[] = []
if (!services?.length) {
core.info('No service containers provided, skipping')
} else {
for (const service of services) {
const configLocation = await registryLogin(service.registry)
try {
await containerPull(service.image, configLocation)
} finally {
await registryLogout(configLocation)
}
setupContainer(service)
const response = await createContainer(
service,
generateContainerName(service.image),
networkName
)
servicesMetadata.push(response)
await containerStart(response.id)
}
}
if (
(container && !containerMetadata?.id) ||
(services?.length && servicesMetadata.some(s => !s.id))
) {
throw new Error(
`Not all containers are started correctly ${
containerMetadata?.id
}, ${servicesMetadata.map(e => e.id).join(',')}`
)
}
const isAlpine = await isContainerAlpine(containerMetadata!.id)
if (containerMetadata?.id) {
containerMetadata.ports = await containerPorts(containerMetadata.id)
}
if (servicesMetadata?.length) {
for (const serviceMetadata of servicesMetadata) {
serviceMetadata.ports = await containerPorts(serviceMetadata.id)
}
}
const healthChecks: Promise<void>[] = [healthCheck(containerMetadata!)]
for (const service of servicesMetadata) {
healthChecks.push(healthCheck(service))
}
try {
await Promise.all(healthChecks)
core.info('All services are healthy')
} catch (error) {
core.error(`Failed to initialize containers, ${error}`)
throw new Error(`Failed to initialize containers, ${error}`)
}
generateResponseFile(
responseFile,
networkName,
containerMetadata,
servicesMetadata,
isAlpine
)
}
function generateResponseFile(
responseFile: string,
networkName: string,
containerMetadata?: ContainerMetadata,
servicesMetadata?: ContainerMetadata[],
isAlpine = false
): void {
// todo figure out if we are alpine
const response = {
state: { network: networkName },
context: {},
isAlpine
}
if (containerMetadata) {
response.state['container'] = containerMetadata.id
const contextMeta = JSON.parse(JSON.stringify(containerMetadata))
if (containerMetadata.ports) {
contextMeta.ports = transformDockerPortsToContextPorts(containerMetadata)
}
response.context['container'] = contextMeta
if (containerMetadata.ports) {
response.context['container'].ports =
transformDockerPortsToContextPorts(containerMetadata)
}
}
if (servicesMetadata && servicesMetadata.length > 0) {
response.state['services'] = []
response.context['services'] = []
for (const meta of servicesMetadata) {
response.state['services'].push(meta.id)
const contextMeta = JSON.parse(JSON.stringify(meta))
if (contextMeta.ports) {
contextMeta.ports = transformDockerPortsToContextPorts(contextMeta)
}
response.context['services'].push(contextMeta)
}
}
writeToResponseFile(responseFile, JSON.stringify(response))
}
function setupContainer(container): void {
container.entryPointArgs = [`-f`, `/dev/null`]
container.entryPoint = 'tail'
}
function generateNetworkName(): string {
return `github_network_${uuidv4()}`
}
function generateContainerName(container): string {
const randomAlias = uuidv4().replace(/-/g, '')
const randomSuffix = uuidv4().substring(0, 6)
return `${randomAlias}_${sanitize(container.image)}_${randomSuffix}`
}
function transformDockerPortsToContextPorts(
meta: ContainerMetadata
): ContextPorts {
// ex: '80/tcp -> 0.0.0.0:80'
const re = /^(\d+)\/(\w+)? -> (.*):(\d+)$/
const contextPorts: ContextPorts = {}
if (meta.ports) {
for (const port of meta.ports) {
const matches = port.match(re)
if (!matches) {
throw new Error(
'Container ports could not match the regex: "^(\\d+)\\/(\\w+)? -> (.*):(\\d+)$"'
)
}
contextPorts[matches[1]] = matches[matches.length - 1]
}
}
return contextPorts
}

View File

@@ -0,0 +1,39 @@
import {
containerBuild,
registryLogin,
registryLogout,
containerPull,
containerRun
} from '../dockerCommands'
import { v4 as uuidv4 } from 'uuid'
import * as core from '@actions/core'
import { RunContainerStepArgs } from 'hooklib/lib'
import { getRunnerLabel } from '../dockerCommands/constants'
export async function runContainerStep(
args: RunContainerStepArgs,
state
): Promise<void> {
const tag = generateBuildTag() // for docker build
if (!args.image) {
core.error('expected an image')
} else {
if (args.dockerfile) {
await containerBuild(args, tag)
args.image = tag
} else {
const configLocation = await registryLogin(args)
try {
await containerPull(args.image, configLocation)
} finally {
await registryLogout(configLocation)
}
}
}
// container will get pruned at the end of the job based on the label, no need to cleanup here
await containerRun(args, tag.split(':')[1], state.network)
}
function generateBuildTag(): string {
return `${getRunnerLabel()}:${uuidv4().substring(0, 6)}`
}

View File

@@ -0,0 +1,9 @@
import { RunScriptStepArgs } from 'hooklib/lib'
import { containerExecStep } from '../dockerCommands'
export async function runScriptStep(
args: RunScriptStepArgs,
state
): Promise<void> {
await containerExecStep(args, state.container)
}

View File

@@ -0,0 +1,48 @@
import * as core from '@actions/core'
import {
Command,
getInputFromStdin,
PrepareJobArgs,
RunContainerStepArgs,
RunScriptStepArgs
} from 'hooklib/lib'
import { exit } from 'process'
import {
cleanupJob,
prepareJob,
runContainerStep,
runScriptStep
} from './hooks'
async function run(): Promise<void> {
const input = await getInputFromStdin()
const args = input['args']
const command = input['command']
const responseFile = input['responseFile']
const state = input['state']
try {
switch (command) {
case Command.PrepareJob:
await prepareJob(args as PrepareJobArgs, responseFile)
return exit(0)
case Command.CleanupJob:
await cleanupJob(null, state, null)
return exit(0)
case Command.RunScriptStep:
await runScriptStep(args as RunScriptStepArgs, state)
return exit(0)
case Command.RunContainerStep:
await runContainerStep(args as RunContainerStepArgs, state)
return exit(0)
default:
throw new Error(`Command not recognized: ${command}`)
}
} catch (error) {
core.error(`${error}`)
exit(1)
}
}
void run()

View File

@@ -0,0 +1,56 @@
/* eslint-disable @typescript-eslint/no-var-requires */
/* eslint-disable @typescript-eslint/no-require-imports */
/* eslint-disable import/no-commonjs */
import * as core from '@actions/core'
// Import this way otherwise typescript has errors
const exec = require('@actions/exec')
export interface RunDockerCommandOptions {
workingDir?: string
input?: Buffer
}
export async function runDockerCommand(
args: string[],
options?: RunDockerCommandOptions
): Promise<string> {
const pipes = await exec.getExecOutput('docker', args, options)
if (pipes.exitCode !== 0) {
core.error(`Docker failed with exit code ${pipes.exitCode}`)
return Promise.reject(pipes.stderr)
}
return Promise.resolve(pipes.stdout)
}
export function sanitize(val: string): string {
if (!val || typeof val !== 'string') {
return ''
}
const newNameBuilder: string[] = []
for (let i = 0; i < val.length; i++) {
const char = val.charAt(i)
if (!newNameBuilder.length) {
if (isAlpha(char)) {
newNameBuilder.push(char)
}
} else {
if (isAlpha(char) || isNumeric(char) || char === '_') {
newNameBuilder.push(char)
}
}
}
return newNameBuilder.join('')
}
// isAlpha accepts single character and checks if
// that character is [a-zA-Z]
function isAlpha(val: string): boolean {
return (
val.length === 1 &&
((val >= 'a' && val <= 'z') || (val >= 'A' && val <= 'Z'))
)
}
function isNumeric(val: string): boolean {
return val.length === 1 && val >= '0' && val <= '9'
}

View File

@@ -0,0 +1,62 @@
import { prepareJob, cleanupJob } from '../src/hooks'
import { v4 as uuidv4 } from 'uuid'
import * as fs from 'fs'
import * as path from 'path'
import TestSetup from './test-setup'
const prepareJobInputPath = path.resolve(
`${__dirname}/../../../examples/prepare-job.json`
)
const tmpOutputDir = `${__dirname}/${uuidv4()}`
let prepareJobOutputPath: string
let prepareJobData: any
let testSetup: TestSetup
jest.useRealTimers()
describe('cleanup job', () => {
beforeAll(() => {
fs.mkdirSync(tmpOutputDir, { recursive: true })
})
afterAll(() => {
fs.rmSync(tmpOutputDir, { recursive: true })
})
beforeEach(async () => {
const prepareJobRawData = fs.readFileSync(prepareJobInputPath, 'utf8')
prepareJobData = JSON.parse(prepareJobRawData.toString())
prepareJobOutputPath = `${tmpOutputDir}/prepare-job-output-${uuidv4()}.json`
fs.writeFileSync(prepareJobOutputPath, '')
testSetup = new TestSetup()
testSetup.initialize()
prepareJobData.args.container.userMountVolumes = testSetup.userMountVolumes
prepareJobData.args.container.systemMountVolumes =
testSetup.systemMountVolumes
prepareJobData.args.container.workingDirectory = testSetup.workingDirectory
await prepareJob(prepareJobData.args, prepareJobOutputPath)
})
afterEach(() => {
fs.rmSync(prepareJobOutputPath, { force: true })
testSetup.teardown()
})
it('should cleanup successfully', async () => {
const prepareJobOutputContent = fs.readFileSync(
prepareJobOutputPath,
'utf-8'
)
const parsedPrepareJobOutput = JSON.parse(prepareJobOutputContent)
await expect(
cleanupJob(prepareJobData.args, parsedPrepareJobOutput.state, null)
).resolves.not.toThrow()
})
})

View File

@@ -0,0 +1,14 @@
import { containerPull } from '../src/dockerCommands'
jest.useRealTimers()
describe('container pull', () => {
it('should fail', async () => {
const arg = { image: 'doesNotExist' }
await expect(containerPull(arg.image, '')).rejects.toThrow()
})
it('should succeed', async () => {
const arg = { image: 'ubuntu:latest' }
await expect(containerPull(arg.image, '')).resolves.not.toThrow()
})
})

View File

@@ -0,0 +1,117 @@
import {
prepareJob,
cleanupJob,
runScriptStep,
runContainerStep
} from '../src/hooks'
import * as fs from 'fs'
import * as path from 'path'
import { v4 as uuidv4 } from 'uuid'
import TestSetup from './test-setup'
const prepareJobJson = fs.readFileSync(
path.resolve(__dirname + '/../../../examples/prepare-job.json'),
'utf8'
)
const containerStepJson = fs.readFileSync(
path.resolve(__dirname + '/../../../examples/run-container-step.json'),
'utf8'
)
const tmpOutputDir = `${__dirname}/_temp/${uuidv4()}`
let prepareJobData: any
let scriptStepJson: any
let scriptStepData: any
let containerStepData: any
let prepareJobOutputFilePath: string
let testSetup: TestSetup
describe('e2e', () => {
beforeAll(() => {
fs.mkdirSync(tmpOutputDir, { recursive: true })
})
afterAll(() => {
fs.rmSync(tmpOutputDir, { recursive: true })
})
beforeEach(() => {
// init dirs
testSetup = new TestSetup()
testSetup.initialize()
prepareJobData = JSON.parse(prepareJobJson)
prepareJobData.args.container.userMountVolumes = testSetup.userMountVolumes
prepareJobData.args.container.systemMountVolumes =
testSetup.systemMountVolumes
prepareJobData.args.container.workingDirectory = testSetup.workingDirectory
scriptStepJson = fs.readFileSync(
path.resolve(__dirname + '/../../../examples/run-script-step.json'),
'utf8'
)
scriptStepData = JSON.parse(scriptStepJson)
scriptStepData.args.workingDirectory = testSetup.workingDirectory
containerStepData = JSON.parse(containerStepJson)
containerStepData.args.workingDirectory = testSetup.workingDirectory
containerStepData.args.userMountVolumes = testSetup.userMountVolumes
containerStepData.args.systemMountVolumes = testSetup.systemMountVolumes
prepareJobOutputFilePath = `${tmpOutputDir}/prepare-job-output-${uuidv4()}.json`
fs.writeFileSync(prepareJobOutputFilePath, '')
})
afterEach(() => {
fs.rmSync(prepareJobOutputFilePath, { force: true })
testSetup.teardown()
})
it('should prepare job, then run script step, then run container step then cleanup', async () => {
await expect(
prepareJob(prepareJobData.args, prepareJobOutputFilePath)
).resolves.not.toThrow()
let rawState = fs.readFileSync(prepareJobOutputFilePath, 'utf-8')
let resp = JSON.parse(rawState)
await expect(
runScriptStep(scriptStepData.args, resp.state)
).resolves.not.toThrow()
await expect(
runContainerStep(containerStepData.args, resp.state)
).resolves.not.toThrow()
await expect(cleanupJob(resp, resp.state, null)).resolves.not.toThrow()
})
it('should prepare job, then run script step, then run container step with Dockerfile then cleanup', async () => {
await expect(
prepareJob(prepareJobData.args, prepareJobOutputFilePath)
).resolves.not.toThrow()
let rawState = fs.readFileSync(prepareJobOutputFilePath, 'utf-8')
let resp = JSON.parse(rawState)
await expect(
runScriptStep(scriptStepData.args, resp.state)
).resolves.not.toThrow()
const dockerfilePath = `${tmpOutputDir}/Dockerfile`
fs.writeFileSync(
dockerfilePath,
`FROM ubuntu:latest
ENV TEST=test
ENTRYPOINT [ "tail", "-f", "/dev/null" ]
`
)
const containerStepDataCopy = JSON.parse(JSON.stringify(containerStepData))
process.env.GITHUB_WORKSPACE = tmpOutputDir
containerStepDataCopy.args.dockerfile = 'Dockerfile'
containerStepDataCopy.args.context = '.'
console.log(containerStepDataCopy.args)
await expect(
runContainerStep(containerStepDataCopy.args, resp.state)
).resolves.not.toThrow()
await expect(cleanupJob(resp, resp.state, null)).resolves.not.toThrow()
})
})

View File

@@ -0,0 +1,103 @@
import * as fs from 'fs'
import { v4 as uuidv4 } from 'uuid'
import { prepareJob } from '../src/hooks'
import TestSetup from './test-setup'
jest.useRealTimers()
let prepareJobOutputPath: string
let prepareJobData: any
const tmpOutputDir = `${__dirname}/_temp/${uuidv4()}`
const prepareJobInputPath = `${__dirname}/../../../examples/prepare-job.json`
let testSetup: TestSetup
describe('prepare job', () => {
beforeAll(() => {
fs.mkdirSync(tmpOutputDir, { recursive: true })
})
afterAll(() => {
fs.rmSync(tmpOutputDir, { recursive: true })
})
beforeEach(async () => {
testSetup = new TestSetup()
testSetup.initialize()
let prepareJobRawData = fs.readFileSync(prepareJobInputPath, 'utf8')
prepareJobData = JSON.parse(prepareJobRawData.toString())
prepareJobData.args.container.userMountVolumes = testSetup.userMountVolumes
prepareJobData.args.container.systemMountVolumes =
testSetup.systemMountVolumes
prepareJobData.args.container.workingDirectory = testSetup.workingDirectory
prepareJobOutputPath = `${tmpOutputDir}/prepare-job-output-${uuidv4()}.json`
fs.writeFileSync(prepareJobOutputPath, '')
})
afterEach(() => {
testSetup.teardown()
})
it('should not throw', async () => {
await expect(
prepareJob(prepareJobData.args, prepareJobOutputPath)
).resolves.not.toThrow()
expect(() => fs.readFileSync(prepareJobOutputPath, 'utf-8')).not.toThrow()
})
it('should have JSON output written to a file', async () => {
await prepareJob(prepareJobData.args, prepareJobOutputPath)
const prepareJobOutputContent = fs.readFileSync(
prepareJobOutputPath,
'utf-8'
)
expect(() => JSON.parse(prepareJobOutputContent)).not.toThrow()
})
it('should have context written to a file', async () => {
await prepareJob(prepareJobData.args, prepareJobOutputPath)
const prepareJobOutputContent = fs.readFileSync(
prepareJobOutputPath,
'utf-8'
)
const parsedPrepareJobOutput = JSON.parse(prepareJobOutputContent)
expect(parsedPrepareJobOutput.context).toBeDefined()
})
it('should have container ids written to file', async () => {
await prepareJob(prepareJobData.args, prepareJobOutputPath)
const prepareJobOutputContent = fs.readFileSync(
prepareJobOutputPath,
'utf-8'
)
const parsedPrepareJobOutput = JSON.parse(prepareJobOutputContent)
expect(parsedPrepareJobOutput.context.container.id).toBeDefined()
expect(typeof parsedPrepareJobOutput.context.container.id).toBe('string')
expect(parsedPrepareJobOutput.context.container.id).toMatch(/^[0-9a-f]+$/)
})
it('should have ports for context written in form [containerPort]:[hostPort]', async () => {
await prepareJob(prepareJobData.args, prepareJobOutputPath)
const prepareJobOutputContent = fs.readFileSync(
prepareJobOutputPath,
'utf-8'
)
const parsedPrepareJobOutput = JSON.parse(prepareJobOutputContent)
const mainContainerPorts = parsedPrepareJobOutput.context.container.ports
expect(mainContainerPorts['8080']).toBe('80')
const redisService = parsedPrepareJobOutput.context.services.find(
s => s.image === 'redis'
)
const redisServicePorts = redisService.ports
expect(redisServicePorts['80']).toBe('8080')
expect(redisServicePorts['8080']).toBe('8088')
})
})

View File

@@ -0,0 +1,112 @@
import * as fs from 'fs'
import { v4 as uuidv4 } from 'uuid'
import { env } from 'process'
import { Mount } from 'hooklib'
export default class TestSetup {
private testdir: string
private runnerMockDir: string
private runnerMockSubdirs = {
work: '_work',
externals: 'externals',
workTemp: '_work/_temp',
workActions: '_work/_actions',
workTool: '_work/_tool',
githubHome: '_work/_temp/_github_home',
githubWorkflow: '_work/_temp/_github_workflow'
}
private readonly projectName = 'example'
constructor() {
this.testdir = `${__dirname}/_temp/${uuidv4()}`
this.runnerMockDir = `${this.testdir}/runner/_layout`
}
private get allTestDirectories() {
const resp = [this.testdir, this.runnerMockDir]
for (const [key, value] of Object.entries(this.runnerMockSubdirs)) {
resp.push(`${this.runnerMockDir}/${value}`)
}
resp.push(
`${this.runnerMockDir}/_work/${this.projectName}/${this.projectName}`
)
return resp
}
public initialize(): void {
for (const dir of this.allTestDirectories) {
fs.mkdirSync(dir, { recursive: true })
}
env['RUNNER_NAME'] = 'test'
env[
'RUNNER_TEMP'
] = `${this.runnerMockDir}/${this.runnerMockSubdirs.workTemp}`
}
public teardown(): void {
fs.rmdirSync(this.testdir, { recursive: true })
}
public get userMountVolumes(): Mount[] {
return [
{
sourceVolumePath: 'my_docker_volume',
targetVolumePath: '/volume_mount',
readOnly: false
}
]
}
public get systemMountVolumes(): Mount[] {
return [
{
sourceVolumePath: '/var/run/docker.sock',
targetVolumePath: '/var/run/docker.sock',
readOnly: false
},
{
sourceVolumePath: `${this.runnerMockDir}/${this.runnerMockSubdirs.work}`,
targetVolumePath: '/__w',
readOnly: false
},
{
sourceVolumePath: `${this.runnerMockDir}/${this.runnerMockSubdirs.externals}`,
targetVolumePath: '/__e',
readOnly: true
},
{
sourceVolumePath: `${this.runnerMockDir}/${this.runnerMockSubdirs.workTemp}`,
targetVolumePath: '/__w/_temp',
readOnly: false
},
{
sourceVolumePath: `${this.runnerMockDir}/${this.runnerMockSubdirs.workActions}`,
targetVolumePath: '/__w/_actions',
readOnly: false
},
{
sourceVolumePath: `${this.runnerMockDir}/${this.runnerMockSubdirs.workTool}`,
targetVolumePath: '/__w/_tool',
readOnly: false
},
{
sourceVolumePath: `${this.runnerMockDir}/${this.runnerMockSubdirs.githubHome}`,
targetVolumePath: '/github/home',
readOnly: false
},
{
sourceVolumePath: `${this.runnerMockDir}/${this.runnerMockSubdirs.githubWorkflow}`,
targetVolumePath: '/github/workflow',
readOnly: false
}
]
}
public get workingDirectory(): string {
return `/__w/${this.projectName}/${this.projectName}`
}
}

View File

@@ -0,0 +1,12 @@
import { sanitize } from '../src/utils'
describe('Utilities', () => {
it('should return sanitized image name', () => {
expect(sanitize('ubuntu:latest')).toBe('ubuntulatest')
})
it('should return the same string', () => {
const validStr = 'teststr8_one'
expect(sanitize(validStr)).toBe(validStr)
})
})

View File

@@ -0,0 +1,11 @@
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"baseUrl": "./",
"outDir": "./lib",
"rootDir": "./src"
},
"include": [
"./src"
]
}