Compare commits

..

7 Commits

Author SHA1 Message Date
zarko-a
3f829eef9e Fix event.json not being copied to /github/workflow in kubernetes-novolume mode (#287)
In run-script-step, the _temp directory was being copied to the workflow pod,
but the _github_home and _github_workflow directories were not being moved
from their temporary location to the /github directory structure where they
are expected by GitHub Actions.

This caused event.json to be missing at /github/workflow/event.json, breaking
actions that depend on GITHUB_EVENT_PATH.

The fix adds a setup step that copies _github_home and _github_workflow from
/__w/_temp/ to /github/ after copying the temp directory to the pod, matching
the behavior of run-container-step and prepareJobScript.

Uses cp -r instead of symlinks to avoid symlink validation errors when copying
files back from the pod to the runner.
2025-11-26 11:47:19 +01:00
zarko-a
011ffb284e Fix workingDir permissions issue by creating it within init container (#283)
* Fix workingDir permissions issue by creating it within init container

* Apply suggestion from @Copilot

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* rework init commands

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-11-26 11:46:47 +01:00
Vincent Van Ouytsel
0951cc73e4 Improve validation checks after copying (#285)
* fix: calculate hash again after failure

The hash from the source is calculated only once. The source hash is
checked with the destination hash, but if the destination hash does not
match, the destination match is calculated again.

The problem is that if the source hash is incorrect, the check will keep
failing because the source hash is never re-calculated.

Now, in the event that the hashes do not match, the hash of the source
and the destination are calculated again.

* fix: use size instead of block size

Previously the %b parameter was used with stat. This displays the block
size of the file. We noticed that in some cases the block size of the
source and the destination file could be slightly different. Since the
source and target run in different containers, they can have different
block sizes defined. If the block size did not match, the hash would also not match, even if
the file content would be exactly the same.

With this change, the block size is no longer used. Instead the actual
size in bytes of the file is listed.
2025-11-24 16:14:02 +01:00
Nikola Jokic
15e808935c Allow non-root container (#264)
* Allow non-root container

* format

* add lint:fix and fix lint errors

* fix tests and volume mounts
2025-11-21 14:44:29 +01:00
vvanouytsel-trendminer
ad9cb43c31 feat: check if required binaries are present (#272)
* feat: check if required binaries are present

Previously the necessary binaries were copied over using the runner
container. This lead to issues in case your main container was using the
musl libc implementation.

Instead of copying over any binaries, the initContainer now checks if
the required binaries are present in the main container.

* feat: get rid of the init container

* fix: add _runner_file_commands

* fix: do not fail if _runner_file_commands does not exist

It seems that for container actions this directory does not exist.
2025-11-10 15:01:40 +01:00
zarko-a
2934de33f8 Sort 'find' output before hashing for consistency (#267)
* Sort 'find' output before hashing for consistency across different platforms

* fix style issues
2025-11-04 12:06:36 +01:00
Jiang Long
ea25fd1b3e Change command to remove sudo to fix fs-init inital container (#263)
* Change command to copy externals instead of move

* fix: using only mv, remove sudo
2025-10-21 15:47:08 +02:00
8 changed files with 140 additions and 60 deletions

View File

@@ -12,6 +12,7 @@
"format": "prettier --write '**/*.ts'", "format": "prettier --write '**/*.ts'",
"format-check": "prettier --check '**/*.ts'", "format-check": "prettier --check '**/*.ts'",
"lint": "eslint packages/**/*.ts", "lint": "eslint packages/**/*.ts",
"lint:fix": "eslint packages/**/*.ts --fix",
"build-all": "npm run build --prefix packages/hooklib && npm run build --prefix packages/k8s && npm run build --prefix packages/docker" "build-all": "npm run build --prefix packages/hooklib && npm run build --prefix packages/k8s && npm run build --prefix packages/docker"
}, },
"repository": { "repository": {

View File

@@ -41,3 +41,4 @@ rules:
- Container actions will not have access to the services network or job container network - Container actions will not have access to the services network or job container network
- Docker [create options](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idcontaineroptions) are not supported - Docker [create options](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idcontaineroptions) are not supported
- Container actions will have to specify the entrypoint, since the default entrypoint will be overridden to run the commands from the workflow. - Container actions will have to specify the entrypoint, since the default entrypoint will be overridden to run the commands from the workflow.
- Container actions need to have the following binaries in their container image: `sh`, `env`, `tail`.

View File

@@ -104,7 +104,7 @@ export async function runContainerStep(
try { try {
core.debug(`Executing container step script in pod ${podName}`) core.debug(`Executing container step script in pod ${podName}`)
return await execPodStep( return await execPodStep(
['/__e/sh', '-e', containerPath], ['sh', '-e', containerPath],
pod.metadata.name, pod.metadata.name,
JOB_CONTAINER_NAME JOB_CONTAINER_NAME
) )
@@ -133,7 +133,7 @@ function createContainerSpec(
podContainer.name = JOB_CONTAINER_NAME podContainer.name = JOB_CONTAINER_NAME
podContainer.image = container.image podContainer.image = container.image
podContainer.workingDir = '/__w' podContainer.workingDir = '/__w'
podContainer.command = ['/__e/tail'] podContainer.command = ['tail']
podContainer.args = DEFAULT_CONTAINER_ENTRY_POINT_ARGS podContainer.args = DEFAULT_CONTAINER_ENTRY_POINT_ARGS
podContainer.volumeMounts = CONTAINER_VOLUMES podContainer.volumeMounts = CONTAINER_VOLUMES

View File

@@ -6,6 +6,7 @@ import { execCpFromPod, execCpToPod, execPodStep } from '../k8s'
import { writeRunScript, sleep, listDirAllCommand } from '../k8s/utils' import { writeRunScript, sleep, listDirAllCommand } from '../k8s/utils'
import { JOB_CONTAINER_NAME } from './constants' import { JOB_CONTAINER_NAME } from './constants'
import { dirname } from 'path' import { dirname } from 'path'
import * as shlex from 'shlex'
export async function runScriptStep( export async function runScriptStep(
args: RunScriptStepArgs, args: RunScriptStepArgs,
@@ -26,6 +27,23 @@ export async function runScriptStep(
const runnerTemp = `${workdir}/_temp` const runnerTemp = `${workdir}/_temp`
await execCpToPod(state.jobPod, runnerTemp, containerTemp) await execCpToPod(state.jobPod, runnerTemp, containerTemp)
// Copy GitHub directories from temp to /github
const setupCommands = [
'mkdir -p /github',
'cp -r /__w/_temp/_github_home /github/home',
'cp -r /__w/_temp/_github_workflow /github/workflow'
]
try {
await execPodStep(
['sh', '-c', shlex.quote(setupCommands.join(' && '))],
state.jobPod,
JOB_CONTAINER_NAME
)
} catch (err) {
core.debug(`Failed to copy GitHub directories: ${JSON.stringify(err)}`)
}
// Execute the entrypoint script // Execute the entrypoint script
args.entryPoint = 'sh' args.entryPoint = 'sh'
args.entryPointArgs = ['-e', containerPath] args.entryPointArgs = ['-e', containerPath]

View File

@@ -20,8 +20,10 @@ import {
listDirAllCommand, listDirAllCommand,
sleep, sleep,
EXTERNALS_VOLUME_NAME, EXTERNALS_VOLUME_NAME,
GITHUB_VOLUME_NAME GITHUB_VOLUME_NAME,
WORK_VOLUME
} from './utils' } from './utils'
import * as shlex from 'shlex'
const kc = new k8s.KubeConfig() const kc = new k8s.KubeConfig()
@@ -91,13 +93,33 @@ export async function createJobPod(
appPod.spec = new k8s.V1PodSpec() appPod.spec = new k8s.V1PodSpec()
appPod.spec.containers = containers appPod.spec.containers = containers
appPod.spec.securityContext = {
fsGroup: 1001
}
// Extract working directory from GITHUB_WORKSPACE
// GITHUB_WORKSPACE is like /__w/repo-name/repo-name
const githubWorkspace = process.env.GITHUB_WORKSPACE
const workingDirPath = githubWorkspace?.split('/').slice(-2).join('/') ?? ''
const initCommands = [
'mkdir -p /mnt/externals',
'mkdir -p /mnt/work',
'mkdir -p /mnt/github',
'mv /home/runner/externals/* /mnt/externals/'
]
if (workingDirPath) {
initCommands.push(`mkdir -p /mnt/work/${workingDirPath}`)
}
appPod.spec.initContainers = [ appPod.spec.initContainers = [
{ {
name: 'fs-init', name: 'fs-init',
image: image:
process.env.ACTIONS_RUNNER_IMAGE || process.env.ACTIONS_RUNNER_IMAGE ||
'ghcr.io/actions/actions-runner:latest', 'ghcr.io/actions/actions-runner:latest',
command: ['sh', '-c', 'sudo mv /home/runner/externals/* /mnt/externals'], command: ['sh', '-c', initCommands.join(' && ')],
securityContext: { securityContext: {
runAsGroup: 1001, runAsGroup: 1001,
runAsUser: 1001 runAsUser: 1001
@@ -106,6 +128,14 @@ export async function createJobPod(
{ {
name: EXTERNALS_VOLUME_NAME, name: EXTERNALS_VOLUME_NAME,
mountPath: '/mnt/externals' mountPath: '/mnt/externals'
},
{
name: WORK_VOLUME,
mountPath: '/mnt/work'
},
{
name: GITHUB_VOLUME_NAME,
mountPath: '/mnt/github'
} }
] ]
} }
@@ -121,6 +151,10 @@ export async function createJobPod(
{ {
name: GITHUB_VOLUME_NAME, name: GITHUB_VOLUME_NAME,
emptyDir: {} emptyDir: {}
},
{
name: WORK_VOLUME,
emptyDir: {}
} }
] ]
@@ -169,33 +203,6 @@ export async function createContainerStepPod(
appPod.spec = new k8s.V1PodSpec() appPod.spec = new k8s.V1PodSpec()
appPod.spec.containers = [container] appPod.spec.containers = [container]
appPod.spec.initContainers = [
{
name: 'fs-init',
image:
process.env.ACTIONS_RUNNER_IMAGE ||
'ghcr.io/actions/actions-runner:latest',
command: [
'bash',
'-c',
`sudo cp $(which sh) /mnt/externals/sh \
&& sudo cp $(which tail) /mnt/externals/tail \
&& sudo cp $(which env) /mnt/externals/env \
&& sudo chmod -R 777 /mnt/externals`
],
securityContext: {
runAsGroup: 1001,
runAsUser: 1001,
privileged: true
},
volumeMounts: [
{
name: EXTERNALS_VOLUME_NAME,
mountPath: '/mnt/externals'
}
]
}
]
appPod.spec.restartPolicy = 'Never' appPod.spec.restartPolicy = 'Never'
@@ -207,6 +214,10 @@ export async function createContainerStepPod(
{ {
name: GITHUB_VOLUME_NAME, name: GITHUB_VOLUME_NAME,
emptyDir: {} emptyDir: {}
},
{
name: WORK_VOLUME,
emptyDir: {}
} }
] ]
@@ -271,19 +282,18 @@ export async function execPodStep(
}) })
} }
export async function execCalculateOutputHash( export async function execCalculateOutputHashSorted(
podName: string, podName: string,
containerName: string, containerName: string,
command: string[] command: string[]
): Promise<string> { ): Promise<string> {
const exec = new k8s.Exec(kc) const exec = new k8s.Exec(kc)
// Create a writable stream that updates a SHA-256 hash with stdout data let output = ''
const hash = createHash('sha256') const outputWriter = new stream.Writable({
const hashWriter = new stream.Writable({
write(chunk, _enc, cb) { write(chunk, _enc, cb) {
try { try {
hash.update(chunk.toString('utf8') as Buffer) output += chunk.toString('utf8')
cb() cb()
} catch (e) { } catch (e) {
cb(e as Error) cb(e as Error)
@@ -298,7 +308,7 @@ export async function execCalculateOutputHash(
podName, podName,
containerName, containerName,
command, command,
hashWriter, // capture stdout for hashing outputWriter, // capture stdout
process.stderr, process.stderr,
null, null,
false /* tty */, false /* tty */,
@@ -320,27 +330,46 @@ export async function execCalculateOutputHash(
.catch(e => reject(e)) .catch(e => reject(e))
}) })
// finalize hash and return digest outputWriter.end()
hashWriter.end()
// Sort lines for consistent ordering across platforms
const sortedOutput =
output
.split('\n')
.filter(line => line.length > 0)
.sort()
.join('\n') + '\n'
const hash = createHash('sha256')
hash.update(sortedOutput)
return hash.digest('hex') return hash.digest('hex')
} }
export async function localCalculateOutputHash( export async function localCalculateOutputHashSorted(
commands: string[] commands: string[]
): Promise<string> { ): Promise<string> {
return await new Promise<string>((resolve, reject) => { return await new Promise<string>((resolve, reject) => {
const hash = createHash('sha256')
const child = spawn(commands[0], commands.slice(1), { const child = spawn(commands[0], commands.slice(1), {
stdio: ['ignore', 'pipe', 'ignore'] stdio: ['ignore', 'pipe', 'ignore']
}) })
let output = ''
child.stdout.on('data', chunk => { child.stdout.on('data', chunk => {
hash.update(chunk) output += chunk.toString('utf8')
}) })
child.on('error', reject) child.on('error', reject)
child.on('close', (code: number) => { child.on('close', (code: number) => {
if (code === 0) { if (code === 0) {
// Sort lines for consistent ordering across distributions/platforms
const sortedOutput =
output
.split('\n')
.filter(line => line.length > 0)
.sort()
.join('\n') + '\n'
const hash = createHash('sha256')
hash.update(sortedOutput)
resolve(hash.digest('hex')) resolve(hash.digest('hex'))
} else { } else {
reject(new Error(`child process exited with code ${code}`)) reject(new Error(`child process exited with code ${code}`))
@@ -360,7 +389,15 @@ export async function execCpToPod(
while (true) { while (true) {
try { try {
const exec = new k8s.Exec(kc) const exec = new k8s.Exec(kc)
const command = ['tar', 'xf', '-', '-C', containerPath] // Use tar to extract with --no-same-owner to avoid ownership issues.
// Then use find to fix permissions. The -m flag helps but we also need to fix permissions after.
const command = [
'sh',
'-c',
`tar xf - --no-same-owner -C ${shlex.quote(containerPath)} 2>/dev/null; ` +
`find ${shlex.quote(containerPath)} -type f -exec chmod u+rw {} \\; 2>/dev/null; ` +
`find ${shlex.quote(containerPath)} -type d -exec chmod u+rwx {} \\; 2>/dev/null`
]
const readStream = tar.pack(runnerPath) const readStream = tar.pack(runnerPath)
const errStream = new WritableStreamBuffer() const errStream = new WritableStreamBuffer()
await new Promise((resolve, reject) => { await new Promise((resolve, reject) => {
@@ -378,7 +415,7 @@ export async function execCpToPod(
if (errStream.size()) { if (errStream.size()) {
reject( reject(
new Error( new Error(
`Error from cpFromPod - details: \n ${errStream.getContentsAsString()}` `Error from execCpToPod - status: ${status.status}, details: \n ${errStream.getContentsAsString()}`
) )
) )
} }
@@ -400,21 +437,21 @@ export async function execCpToPod(
} }
} }
const want = await localCalculateOutputHash([ let attempts = 15
const delay = 1000
for (let i = 0; i < attempts; i++) {
try {
const want = await localCalculateOutputHashSorted([
'sh', 'sh',
'-c', '-c',
listDirAllCommand(runnerPath) listDirAllCommand(runnerPath)
]) ])
let attempts = 15 const got = await execCalculateOutputHashSorted(
const delay = 1000 podName,
for (let i = 0; i < attempts; i++) { JOB_CONTAINER_NAME,
try { ['sh', '-c', listDirAllCommand(containerPath)]
const got = await execCalculateOutputHash(podName, JOB_CONTAINER_NAME, [ )
'sh',
'-c',
listDirAllCommand(containerPath)
])
if (got !== want) { if (got !== want) {
core.debug( core.debug(
@@ -441,11 +478,6 @@ export async function execCpFromPod(
core.debug( core.debug(
`Copying from pod ${podName} ${containerPath} to ${targetRunnerPath}` `Copying from pod ${podName} ${containerPath} to ${targetRunnerPath}`
) )
const want = await execCalculateOutputHash(podName, JOB_CONTAINER_NAME, [
'sh',
'-c',
listDirAllCommand(containerPath)
])
let attempt = 0 let attempt = 0
while (true) { while (true) {
@@ -506,7 +538,13 @@ export async function execCpFromPod(
const delay = 1000 const delay = 1000
for (let i = 0; i < attempts; i++) { for (let i = 0; i < attempts; i++) {
try { try {
const got = await localCalculateOutputHash([ const want = await execCalculateOutputHashSorted(
podName,
JOB_CONTAINER_NAME,
['sh', '-c', listDirAllCommand(containerPath)]
)
const got = await localCalculateOutputHashSorted([
'sh', 'sh',
'-c', '-c',
listDirAllCommand(targetRunnerPath) listDirAllCommand(targetRunnerPath)

View File

@@ -15,12 +15,17 @@ export const ENV_USE_KUBE_SCHEDULER = 'ACTIONS_RUNNER_USE_KUBE_SCHEDULER'
export const EXTERNALS_VOLUME_NAME = 'externals' export const EXTERNALS_VOLUME_NAME = 'externals'
export const GITHUB_VOLUME_NAME = 'github' export const GITHUB_VOLUME_NAME = 'github'
export const WORK_VOLUME = 'work'
export const CONTAINER_VOLUMES: k8s.V1VolumeMount[] = [ export const CONTAINER_VOLUMES: k8s.V1VolumeMount[] = [
{ {
name: EXTERNALS_VOLUME_NAME, name: EXTERNALS_VOLUME_NAME,
mountPath: '/__e' mountPath: '/__e'
}, },
{
name: WORK_VOLUME,
mountPath: '/__w'
},
{ {
name: GITHUB_VOLUME_NAME, name: GITHUB_VOLUME_NAME,
mountPath: '/github' mountPath: '/github'
@@ -102,7 +107,7 @@ export function writeContainerStepScript(
rm "$0" # remove script after running rm "$0" # remove script after running
mv /__w/_temp/_github_home /github/home && \ mv /__w/_temp/_github_home /github/home && \
mv /__w/_temp/_github_workflow /github/workflow && \ mv /__w/_temp/_github_workflow /github/workflow && \
mv /__w/_temp/_runner_file_commands /github/file_commands && \ mv /__w/_temp/_runner_file_commands /github/file_commands || true && \
mv /__w/${parts.join('/')}/ /github/workspace && \ mv /__w/${parts.join('/')}/ /github/workspace && \
cd /github/workspace && \ cd /github/workspace && \
exec ${environmentPrefix} ${entryPoint} ${ exec ${environmentPrefix} ${entryPoint} ${
@@ -291,5 +296,5 @@ export async function sleep(ms: number): Promise<void> {
} }
export function listDirAllCommand(dir: string): string { export function listDirAllCommand(dir: string): string {
return `cd ${shlex.quote(dir)} && find . -not -path '*/_runner_hook_responses*' -exec stat -c '%b %n' {} \\;` return `cd ${shlex.quote(dir)} && find . -not -path '*/_runner_hook_responses*' -exec stat -c '%s %n' {} \\;`
} }

View File

@@ -26,6 +26,7 @@ describe('e2e', () => {
afterEach(async () => { afterEach(async () => {
await testHelper.cleanup() await testHelper.cleanup()
}) })
it('should prepare job, run script step, run container step then cleanup without errors', async () => { it('should prepare job, run script step, run container step then cleanup without errors', async () => {
await expect( await expect(
prepareJob(prepareJobData.args, prepareJobOutputFilePath) prepareJob(prepareJobData.args, prepareJobOutputFilePath)

View File

@@ -231,4 +231,20 @@ describe('Prepare job', () => {
expect(() => content.context.services[0].image).not.toThrow() expect(() => content.context.services[0].image).not.toThrow()
} }
) )
it('should prepare job with container with non-root user', async () => {
prepareJobData.args!.container!.image =
'ghcr.io/actions/actions-runner:latest' // known to use user 1001
await expect(
prepareJob(prepareJobData.args, prepareJobOutputFilePath)
).resolves.not.toThrow()
const content = JSON.parse(
fs.readFileSync(prepareJobOutputFilePath).toString()
)
expect(content.state.jobPod).toBeTruthy()
expect(content.context.container.image).toBe(
'ghcr.io/actions/actions-runner:latest'
)
})
}) })