Compare commits

..

14 commits

Author SHA1 Message Date
earl-warren
81e85e7e74 Merge pull request 'upgrade to act 1.5.1' (#29) from earl-warren/runner:wip-act into main
Reviewed-on: https://code.forgejo.org/forgejo/runner/pulls/29
Reviewed-by: dachary <dachary@noreply.code.forgejo.org>
2023-05-21 23:23:18 +00:00
Earl Warren
5b3db016db
upgrade to act 1.5.1 2023-05-22 01:12:40 +02:00
earl-warren
3164ec1205 Merge pull request 'upgrade to act 1.5.0' (#28) from earl-warren/runner:wip-act into main
Reviewed-on: https://code.forgejo.org/forgejo/runner/pulls/28
Reviewed-by: dachary <dachary@noreply.code.forgejo.org>
2023-05-21 18:31:43 +00:00
Earl Warren
0bd5dc47c6
upgrade to act 1.5.0 2023-05-21 20:27:40 +02:00
earl-warren
c0780f9de5 Merge pull request 'docker/build-push-action@v4: Invalid token specified' (#27) from earl-warren/runner:wip-build-push into main
Reviewed-on: https://code.forgejo.org/forgejo/runner/pulls/27
Reviewed-by: dachary <dachary@noreply.code.forgejo.org>
2023-04-30 22:07:58 +00:00
Earl Warren
84d878751c
docker/build-push-action@v4: Invalid token specified
Cannot read properties of undefined (reading 'replace')
2023-05-01 00:04:21 +02:00
earl-warren
3a0a947017 Merge pull request '[FORGEJO] secrets are trimmed from output, cope with it' (#25) from earl-warren/runner:wip-secrets into main
Reviewed-on: https://code.forgejo.org/forgejo/runner/pulls/25
Reviewed-by: dachary <dachary@noreply.code.forgejo.org>
2023-04-30 21:43:24 +00:00
Earl Warren
6f8ca39fa4
[FORGEJO] secrets are trimmed from output, cope with it 2023-04-30 23:32:40 +02:00
Earl Warren
c9ea086e40
[FORGEJO] workflows 2023-04-30 18:26:17 +02:00
Earl Warren
84a6729f79
[FORGEJO] look for workflows in the .forgejo/workflows directory 2023-04-30 18:14:44 +02:00
Earl Warren
28e99cc668
[FORGEJO] GITHUB_SERVER_URL is always the runner registration addr 2023-04-30 18:14:06 +02:00
Earl Warren
c751347d91
[FORGEJO] include ACT at the desired version 2023-04-30 18:13:38 +02:00
Earl Warren
02b72efa6f
[FORGEJO] build forgejo-runner 2023-04-30 18:13:11 +02:00
Earl Warren
b02cc3071e
[FORGEJO] delete files conflicting with Forgejo 2023-04-30 18:11:35 +02:00
71 changed files with 2516 additions and 3157 deletions

View file

@ -1,16 +0,0 @@
root = true
[*]
indent_style = space
indent_size = 2
tab_width = 2
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
[*.{go}]
indent_style = tab
[Makefile]
indent_style = tab

View file

@ -1,16 +0,0 @@
#!/bin/bash
set -ex
setup_forgejo=$1
setup_forgejo_pr=$2
runner=$3
runner_pr=$4
url=$(jq --raw-output .head.repo.html_url < $runner_pr)
test "$url" != null
branch=$(jq --raw-output .head.ref < $runner_pr)
test "$branch" != null
cd $setup_forgejo
./utils/upgrade-runner.sh $url @$branch
date > last-upgrade

View file

@ -1,24 +0,0 @@
import json
expectedLabels = {
"maintainer": "contact@forgejo.org",
"org.opencontainers.image.authors": "Forgejo",
"org.opencontainers.image.url": "https://forgejo.org",
"org.opencontainers.image.documentation": "https://forgejo.org/docs/latest/admin/actions/#forgejo-runner",
"org.opencontainers.image.source": "https://code.forgejo.org/forgejo/runner",
"org.opencontainers.image.version": "1.2.3",
"org.opencontainers.image.vendor": "Forgejo",
"org.opencontainers.image.licenses": "MIT",
"org.opencontainers.image.title": "Forgejo Runner",
"org.opencontainers.image.description": "A runner for Forgejo Actions.",
}
inspect = None
with open("./labels.json", "r") as f:
inspect = json.load(f)
assert inspect
labels = inspect[0]["Config"]["Labels"]
for k, v in expectedLabels.items():
assert k in labels, f"'{k}' is missing from labels"
assert labels[k] == v, f"expected {v} in key {k}, found {labels[k]}"

View file

@ -1,11 +0,0 @@
---
on: push
jobs:
ipv6:
runs-on: docker
container:
image: code.forgejo.org/oci/debian:bookworm
steps:
- run: |
apt update -qq ; apt --quiet install -qq --yes iputils-ping
ping -c 1 -6 ::1

View file

@ -1,90 +0,0 @@
name: Integration tests for the release process
on:
push:
paths:
- go.mod
- Dockerfile
- .forgejo/workflows/build-release.yml
- .forgejo/workflows/build-release-integration.yml
pull_request:
paths:
- go.mod
- Dockerfile
- .forgejo/workflows/build-release.yml
- .forgejo/workflows/build-release-integration.yml
jobs:
release-simulation:
runs-on: self-hosted
if: github.repository_owner != 'forgejo-integration' && github.repository_owner != 'forgejo-release'
steps:
- uses: actions/checkout@v3
- id: forgejo
uses: https://code.forgejo.org/actions/setup-forgejo@v1
with:
user: root
password: admin1234
image-version: 1.20
lxc-ip-prefix: 10.0.9
- name: publish
run: |
set -x
version=1.2.3
cat > /etc/docker/daemon.json <<EOF
{
"insecure-registries" : ["${{ steps.forgejo.outputs.host-port }}"]
}
EOF
systemctl restart docker
dir=$(mktemp -d)
trap "rm -fr $dir" EXIT
url=http://root:admin1234@${{ steps.forgejo.outputs.host-port }}
export FORGEJO_RUNNER_LOGS="${{ steps.forgejo.outputs.runner-logs }}"
#
# Create a new project with the runner and the release workflow only
#
rsync -a --exclude .git ./ $dir/
rm $(find $dir/.forgejo/workflows/*.yml | grep -v build-release.yml)
forgejo-test-helper.sh push $dir $url root runner
sha=$(forgejo-test-helper.sh branch_tip $url root/runner main)
#
# Push a tag to trigger the release workflow and wait for it to complete
#
forgejo-curl.sh api_json --data-raw '{"tag_name": "v'$version'", "target": "'$sha'"}' $url/api/v1/repos/root/runner/tags
LOOPS=180 forgejo-test-helper.sh wait_success "$url" root/runner $sha
#
# uncomment to see the logs even when everything is reported to be working ok
#
#cat $FORGEJO_RUNNER_LOGS
#
# Minimal sanity checks. e2e test is for the setup-forgejo action
#
for arch in amd64 arm64 ; do
binary=forgejo-runner-$version-linux-$arch
for suffix in '' '.xz' ; do
curl --fail -L -sS $url/root/runner/releases/download/v$version/$binary$suffix > $binary$suffix
if test "$suffix" = .xz ; then
unxz --keep $binary$suffix
fi
chmod +x $binary
./$binary --version | grep $version
curl --fail -L -sS $url/root/runner/releases/download/v$version/$binary$suffix.sha256 > $binary$suffix.sha256
shasum -a 256 --check $binary$suffix.sha256
rm $binary$suffix
done
done
docker pull ${{ steps.forgejo.outputs.host-port }}/root/runner:$version
docker inspect ${{ steps.forgejo.outputs.host-port}}/root/runner:$version > labels.json
python3 .forgejo/labelscompare.py

View file

@ -1,103 +0,0 @@
# SPDX-License-Identifier: MIT
#
# https://code.forgejo.org/forgejo/runner
#
# Build the runner binaries and OCI images
#
# ROLE: forgejo-integration
# DOER: forgejo-ci
# TOKEN: <generated from https://code.forgejo.org/forgejo-ci>
#
name: Build release
on:
push:
tags: 'v*'
jobs:
release:
runs-on: self-hosted
# root is used for testing, allow it
if: secrets.ROLE == 'forgejo-integration' || github.repository_owner == 'root'
steps:
- uses: actions/checkout@v3
- name: Increase the verbosity when there are no secrets
id: verbose
run: |
if test -z "${{ secrets.TOKEN }}"; then
value=true
else
value=false
fi
echo "value=$value" >> "$GITHUB_OUTPUT"
- name: Sanitize the name of the repository
id: repository
run: |
echo "value=${GITHUB_REPOSITORY##*/}" >> "$GITHUB_OUTPUT"
- name: create test TOKEN
id: token
if: ${{ secrets.TOKEN == '' }}
run: |
apt-get -qq install -y jq
url="${{ env.GITHUB_SERVER_URL }}"
hostport=${url##http*://}
hostport=${hostport%%/}
doer=root
api=http://$doer:admin1234@$hostport/api/v1/users/$doer/tokens
curl -sS -X DELETE $api/release
token=$(curl -sS -X POST -H 'Content-Type: application/json' --data-raw '{"name": "release", "scopes": ["all"]}' $api | jq --raw-output .sha1)
echo "value=${token}" >> "$GITHUB_OUTPUT"
- name: version from ref_name
id: tag-version
run: |
version=${GITHUB_REF_NAME##*v}
echo "value=$version" >> "$GITHUB_OUTPUT"
- name: release notes
id: release-notes
run: |
anchor=${{ steps.tag-version.outputs.value }}
anchor=${anchor//./-}
cat >> "$GITHUB_OUTPUT" <<EOF
value<<ENDVAR
See https://code.forgejo.org/forgejo/runner/src/branch/main/RELEASE-NOTES.md#$anchor
ENDVAR
EOF
- name: build without TOKEN
if: ${{ secrets.TOKEN == '' }}
uses: https://code.forgejo.org/forgejo/forgejo-build-publish/build@v5
with:
forgejo: "${{ env.GITHUB_SERVER_URL }}"
owner: "${{ env.GITHUB_REPOSITORY_OWNER }}"
repository: "${{ steps.repository.outputs.value }}"
doer: root
sha: "${{ github.sha }}"
release-version: "${{ steps.tag-version.outputs.value }}"
token: ${{ steps.token.outputs.value }}
platforms: linux/amd64,linux/arm64
release-notes: "${{ steps.release-notes.outputs.value }}"
binary-name: forgejo-runner
binary-path: /bin/forgejo-runner
verbose: ${{ steps.verbose.outputs.value }}
- name: build with TOKEN
if: ${{ secrets.TOKEN != '' }}
uses: https://code.forgejo.org/forgejo/forgejo-build-publish/build@v5
with:
forgejo: "${{ env.GITHUB_SERVER_URL }}"
owner: "${{ env.GITHUB_REPOSITORY_OWNER }}"
repository: "${{ steps.repository.outputs.value }}"
doer: "${{ secrets.DOER }}"
sha: "${{ github.sha }}"
release-version: "${{ steps.tag-version.outputs.value }}"
token: "${{ secrets.TOKEN }}"
platforms: linux/amd64,linux/arm64
release-notes: "${{ steps.release-notes.outputs.value }}"
binary-name: forgejo-runner
binary-path: /bin/forgejo-runner
verbose: ${{ steps.verbose.outputs.value }}

View file

@ -1,25 +0,0 @@
# SPDX-License-Identifier: MIT
on:
pull_request_target:
types:
- opened
- synchronize
- closed
jobs:
cascade:
runs-on: docker
if: vars.CASCADE != 'no'
steps:
- uses: actions/cascading-pr@v1
with:
origin-url: ${{ env.GITHUB_SERVER_URL }}
origin-repo: forgejo/runner
origin-token: ${{ secrets.CASCADING_PR_ORIGIN }}
origin-pr: ${{ github.event.pull_request.number }}
destination-url: ${{ env.GITHUB_SERVER_URL }}
destination-repo: actions/setup-forgejo
destination-fork-repo: cascading-pr/setup-forgejo
destination-branch: main
destination-token: ${{ secrets.CASCADING_PR_DESTINATION }}
close-merge: true
update: .forgejo/cascading-pr-setup-forgejo

View file

@ -1,70 +0,0 @@
# SPDX-License-Identifier: MIT
on:
push:
branches:
- 'main'
pull_request:
jobs:
example-docker-compose:
runs-on: self-hosted
steps:
- uses: actions/checkout@v4
- name: Install docker
run: |
apt-get update -qq
export DEBIAN_FRONTEND=noninteractive
apt-get install -qq -y ca-certificates curl gnupg
install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
echo "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
apt-get update -qq
apt-get install -qq -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin=2.20.2-1~debian.11~bullseye
docker version
#
# docker compose is prone to non backward compatible changes, pin it
#
apt-get install -qq -y docker-compose-plugin=2.20.2-1~debian.11~bullseye
docker compose version
- name: run the example
run: |
set -x
cd examples/docker-compose
secret=$(openssl rand -hex 20)
sed -i -e "s/{SHARED_SECRET}/$secret/" compose-forgejo-and-runner.yml
cli="docker compose --progress quiet -f compose-forgejo-and-runner.yml"
#
# Launch Forgejo & the runner
#
$cli up -d
for delay in $(seq 60) ; do test -f /srv/runner-data/.runner && break ; sleep 30 ; done
test -f /srv/runner-data/.runner
#
# Run the demo workflow
#
cli="$cli -f compose-demo-workflow.yml"
$cli up -d demo-workflow
#
# Wait for the demo workflow to complete
#
success='DEMO WORKFLOW SUCCESS'
failure='DEMO WORKFLOW FAILURE'
for delay in $(seq 60) ; do
$cli logs demo-workflow > /tmp/out
grep --quiet "$success" /tmp/out && break
grep --quiet "$failure" /tmp/out && break
$cli ps --all
$cli logs --tail=20 runner-daemon demo-workflow
sleep 30
done
grep --quiet "$success" /tmp/out
$cli logs runner-daemon > /tmp/runner.log
grep --quiet 'Start image=code.forgejo.org/oci/node:20-bookworm' /tmp/runner.log
- name: full docker compose logs
if: always()
run: |
cd examples/docker-compose
docker compose -f compose-forgejo-and-runner.yml -f compose-demo-workflow.yml logs

View file

@ -0,0 +1,55 @@
name: Integration tests for the release process
on:
push:
paths:
- go.mod
- .forgejo/workflows/release.yml
- .forgejo/workflows/integration.yml
jobs:
release-simulation:
runs-on: self-hosted
steps:
- uses: actions/checkout@v3
- id: forgejo
uses: https://code.forgejo.org/actions/setup-forgejo@v1
with:
user: root
password: admin1234
image-version: 1.19
lxc-ip-prefix: 10.0.9
- name: publish the runner release
run: |
set -x
dir=$(mktemp -d)
trap "rm -fr $dir" EXIT
url=http://root:admin1234@${{ steps.forgejo.outputs.host-port }}
export FORGEJO_RUNNER_LOGS="${{ steps.forgejo.outputs.runner-logs }}"
#
# Create a new project with the runner and the release workflow only
#
rsync -a --exclude .git ./ $dir/
rm $(find $dir/.forgejo/workflows/*.yml | grep -v release.yml)
forgejo-test-helper.sh push $dir $url root runner |& tee $dir/pushed
eval $(grep '^sha=' < $dir/pushed)
#
# Push a tag to trigger the release workflow and wait for it to complete
#
forgejo-test-helper.sh api POST $url repos/root/runner/tags ${{ steps.forgejo.outputs.token }} --data-raw '{"tag_name": "v1.2.3", "target": "'$sha'"}'
LOOPS=180 forgejo-test-helper.sh wait_success "$url" root/runner $sha
#
# Minimal sanity checks. e2e test is for the setup-forgejo action
# and the infrastructure playbook.
#
curl -L -sS $url/root/runner/releases/download/v1.2.3/forgejo-runner-amd64 > forgejo-runner
chmod +x forgejo-runner
./forgejo-runner --version | grep 1.2.3

View file

@ -1,42 +0,0 @@
# SPDX-License-Identifier: MIT
#
# https://forgejo.octopuce.forgejo.org/forgejo-release/runner
#
# Copies & sign a release from code.forgejo.org/forgejo-integration/runner to code.forgejo.org/forgejo/runner
#
# ROLE: forgejo-release
# FORGEJO: https://code.forgejo.org
# FROM_OWNER: forgejo-integration
# TO_OWNER: forgejo
# DOER: release-team
# TOKEN: <generated from codeberg.org/release-team>
# GPG_PRIVATE_KEY: <XYZ>
# GPG_PASSPHRASE: <ABC>
#
name: publish
on:
push:
tags: 'v*'
jobs:
publish:
runs-on: self-hosted
if: secrets.DOER != '' && secrets.FORGEJO != '' && secrets.TO_OWNER != '' && secrets.FROM_OWNER != '' && secrets.TOKEN != ''
steps:
- uses: actions/checkout@v3
- name: copy & sign
uses: https://code.forgejo.org/forgejo/forgejo-build-publish/publish@v1
with:
forgejo: ${{ secrets.FORGEJO }}
from-owner: ${{ secrets.FROM_OWNER }}
to-owner: ${{ secrets.TO_OWNER }}
repo: "runner"
ref-name: ${{ github.ref_name }}
container-suffixes: " "
doer: ${{ secrets.DOER }}
token: ${{ secrets.TOKEN }}
gpg-private-key: ${{ secrets.GPG_PRIVATE_KEY }}
gpg-passphrase: ${{ secrets.GPG_PASSPHRASE }}
verbose: ${{ secrets.VERBOSE }}

View file

@ -0,0 +1,131 @@
name: Publish release
on:
push:
tags: 'v*'
jobs:
release:
runs-on: self-hosted
steps:
- uses: actions/checkout@v3
- id: verbose
run: |
# if there are no secrets, be verbose
if test -z "${{ secrets.TOKEN }}"; then
value=true
else
value=false
fi
echo "value=$value" >> "$GITHUB_OUTPUT"
echo "shell=set -x" >> "$GITHUB_OUTPUT"
- id: registry
run: |
${{ steps.verbose.outputs.shell }}
url="${{ env.GITHUB_SERVER_URL }}"
hostport=${url##http*://}
hostport=${hostport%%/}
echo "host-port=${hostport}" >> "$GITHUB_OUTPUT"
if ! [[ $url =~ ^http:// ]] ; then
exit 0
fi
cat >> "$GITHUB_OUTPUT" <<EOF
insecure=true
buildx-config<<ENDVAR
[registry."${hostport}"]
http = true
ENDVAR
EOF
- id: secrets
run: |
token="${{ secrets.TOKEN }}"
doer="${{ secrets.DOER }}"
if test -z "$token"; then
apt-get -qq install -y jq
doer=root
api=http://$doer:admin1234@${{ steps.registry.outputs.host-port }}/api/v1/users/$doer/tokens
curl -sS -X DELETE $api/release
token=$(curl -sS -X POST -H 'Content-Type: application/json' --data-raw '{"name": "release", "scopes": ["all"]}' $api | jq --raw-output .sha1)
fi
echo "token=${token}" >> "$GITHUB_OUTPUT"
echo "doer=${doer}" >> "$GITHUB_OUTPUT"
- name: allow docker pull/push to forgejo
if: ${{ steps.registry.outputs.insecure }}
run: |-
mkdir /etc/docker
cat > /etc/docker/daemon.json <<EOF
{
"insecure-registries" : ["${{ steps.registry.outputs.host-port }}"],
"bip": "172.26.0.1/16"
}
EOF
- run: |
echo deb http://deb.debian.org/debian bullseye-backports main | tee /etc/apt/sources.list.d/backports.list && apt-get -qq update
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -qq -y -t bullseye-backports docker.io
- uses: https://github.com/docker/setup-buildx-action@v2
with:
config-inline: |
${{ steps.registry.outputs.buildx-config }}
- run: |
token="${{ steps.secrets.outputs.token }}" ; test -z "$token" && token="${{ secrets.TOKEN }}"
doer="${{ steps.secrets.outputs.doer }}" ; test -z "$doer" && doer="${{ secrets.DOER }}"
BASE64_AUTH=`echo -n "$doer:$token" | base64`
mkdir -p ~/.docker
echo "{\"auths\": {\"$CI_REGISTRY\": {\"auth\": \"$BASE64_AUTH\"}}}" > ~/.docker/config.json
env:
CI_REGISTRY: "${{ env.GITHUB_SERVER_URL }}${{ env.GITHUB_REPOSITORY_OWNER }}"
- id: build
run: |
${{ steps.verbose.outputs.shell }}
tag="${{ github.ref_name }}"
tag=${tag##*v}
echo "tag=$tag" >> "$GITHUB_OUTPUT"
echo "image=${{ steps.registry.outputs.host-port }}/${{ github.repository }}:${tag}" >> "$GITHUB_OUTPUT"
- uses: https://github.com/docker/build-push-action@v4
# workaround until https://github.com/docker/build-push-action/commit/d8823bfaed2a82c6f5d4799a2f8e86173c461aba is in @v4 or @v5 is released
env:
ACTIONS_RUNTIME_TOKEN: ''
with:
context: .
push: true
platforms: linux/amd64,linux/arm64
tags: ${{ steps.build.outputs.image }}
- run: |
${{ steps.verbose.outputs.shell }}
mkdir -p release
for arch in amd64 arm64; do
docker create --platform linux/$arch --name runner ${{ steps.build.outputs.image }}
docker cp runner:/bin/forgejo-runner release/forgejo-runner-$arch
shasum -a 256 < release/forgejo-runner-$arch > release/forgejo-runner-$arch.sha256
docker rm runner
done
- name: publish release (when TOKEN secret is NOT set)
if: ${{ secrets.TOKEN == '' }}
uses: https://code.forgejo.org/actions/forgejo-release@v1
with:
direction: upload
release-dir: release
release-notes: "RELEASE-NOTES#${{ steps.build.outputs.tag }}"
token: ${{ steps.secrets.outputs.token }}
verbose: ${{ steps.verbose.outputs.value }}
- name: publish release (when TOKEN secret is set)
if: ${{ secrets.TOKEN != '' }}
uses: https://code.forgejo.org/actions/forgejo-release@v1
with:
direction: upload
release-dir: release
release-notes: "RELEASE-NOTES#${{ steps.build.outputs.tag }}"
token: ${{ secrets.TOKEN }}
verbose: ${{ steps.verbose.outputs.value }}

View file

@ -1,108 +1,23 @@
name: checks
on:
push:
branches:
- 'main'
pull_request:
on:
- pull_request
- push
env:
FORGEJO_HOST_PORT: 'forgejo:3000'
FORGEJO_ADMIN_USER: 'root'
FORGEJO_ADMIN_PASSWORD: 'admin1234'
FORGEJO_RUNNER_SECRET: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
FORGEJO_SCRIPT: |
/bin/s6-svscan /etc/s6 & sleep 10 ; su -c "forgejo admin user create --admin --username $FORGEJO_ADMIN_USER --password $FORGEJO_ADMIN_PASSWORD --email root@example.com" git && su -c "forgejo forgejo-cli actions register --labels docker --name therunner --secret $FORGEJO_RUNNER_SECRET" git && sleep infinity
GOPROXY: https://goproxy.io,direct
jobs:
build-and-tests:
name: build and test
if: github.repository_owner != 'forgejo-integration' && github.repository_owner != 'forgejo-experimental' && github.repository_owner != 'forgejo-release'
runs-on: docker
services:
forgejo:
image: codeberg.org/forgejo/forgejo:1.21
env:
FORGEJO__security__INSTALL_LOCK: "true"
FORGEJO__log__LEVEL: "debug"
FORGEJO__actions__ENABLED: "true"
FORGEJO_ADMIN_USER: ${{ env.FORGEJO_ADMIN_USER }}
FORGEJO_ADMIN_PASSWORD: ${{ env.FORGEJO_ADMIN_PASSWORD }}
FORGEJO_RUNNER_SECRET: ${{ env.FORGEJO_RUNNER_SECRET }}
cmd:
- 'bash'
- '-c'
- ${{ env.FORGEJO_SCRIPT }}
lint:
name: check and test
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v3
with:
go-version: '1.21'
- uses: actions/checkout@v4
- run: make vet
- run: make build
- uses: https://code.forgejo.org/actions/upload-artifact@v3
with:
name: forgejo-runner
path: forgejo-runner
- name: check the forgejo server is responding
run: |
apt-get update -qq
apt-get install -y -qq jq curl
test $FORGEJO_ADMIN_USER = $(curl -sS http://$FORGEJO_ADMIN_USER:$FORGEJO_ADMIN_PASSWORD@$FORGEJO_HOST_PORT/api/v1/user | jq --raw-output .login)
- run: make FORGEJO_URL=http://$FORGEJO_HOST_PORT test
runner-exec-tests:
needs: [build-and-tests]
name: runner exec tests
if: github.repository_owner != 'forgejo-integration' && github.repository_owner != 'forgejo-experimental' && github.repository_owner != 'forgejo-release'
runs-on: self-hosted
steps:
- uses: actions/checkout@v4
- uses: https://code.forgejo.org/actions/download-artifact@v3
with:
name: forgejo-runner
- name: install docker
run: |
mkdir /etc/docker
cat > /etc/docker/daemon.json <<EOF
{
"ipv6": true,
"experimental": true,
"ip6tables": true,
"fixed-cidr-v6": "fd05:d0ca:1::/64",
"default-address-pools": [
{
"base": "172.19.0.0/16",
"size": 24
},
{
"base": "fd05:d0ca:2::/104",
"size": 112
}
]
}
EOF
apt --quiet install --yes -qq docker.io
- name: forgejo-runner exec --enable-ipv6
run: |
set -x
chmod +x forgejo-runner
./forgejo-runner exec --enable-ipv6 --workflows .forgejo/testdata/ipv6.yml
if ./forgejo-runner exec --workflows .forgejo/testdata/ipv6.yml >& /tmp/out ; then
cat /tmp/out
echo "IPv6 not enabled, should fail"
exit 1
fi
go-version: 1.20
- uses: actions/checkout@v3
- name: vet checks
run: make vet
- name: build
run: make build
- name: test
run: make test

1
.gitattributes vendored
View file

@ -1 +0,0 @@
* text=auto eol=lf

View file

@ -0,0 +1,97 @@
name: release-nightly
on:
push:
branches: [ main ]
env:
GOPATH: /go_path
GOCACHE: /go_cache
jobs:
goreleaser:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0 # all history for all branches and tags
- uses: actions/setup-go@v3
with:
go-version: '>=1.20.1'
- uses: https://gitea.com/actions/go-hashfiles@v0.0.1
id: hash-go
with:
patterns: |
go.mod
go.sum
- name: cache go
id: cache-go
uses: https://github.com/actions/cache@v3
with:
path: |
/go_path
/go_cache
key: go_path-${{ steps.hash-go.outputs.hash }}
- name: goreleaser
uses: https://github.com/goreleaser/goreleaser-action@v4
with:
distribution: goreleaser-pro
version: latest
args: release --nightly
env:
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
AWS_REGION: ${{ secrets.AWS_REGION }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
S3_REGION: ${{ secrets.AWS_REGION }}
S3_BUCKET: ${{ secrets.AWS_BUCKET }}
release-image:
runs-on: ubuntu-latest
container:
image: catthehacker/ubuntu:act-latest
env:
DOCKER_ORG: gitea
DOCKER_LATEST: nightly
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0 # all history for all branches and tags
- name: dockerfile lint check
uses: https://github.com/hadolint/hadolint-action@v3.1.0
with:
dockerfile: Dockerfile
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker BuildX
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Get Meta
id: meta
run: |
echo REPO_NAME=$(echo ${GITHUB_REPOSITORY} | awk -F"/" '{print $2}') >> $GITHUB_OUTPUT
echo REPO_VERSION=$(git describe --tags --always | sed 's/^v//') >> $GITHUB_OUTPUT
- name: Build and push
uses: docker/build-push-action@v4
env:
ACTIONS_RUNTIME_TOKEN: '' # See https://gitea.com/gitea/act_runner/issues/119
with:
context: .
file: ./Dockerfile
platforms: |
linux/amd64
linux/arm64
push: true
tags: |
${{ env.DOCKER_ORG }}/${{ steps.meta.outputs.REPO_NAME }}:${{ env.DOCKER_LATEST }}

View file

@ -0,0 +1,108 @@
name: release-tag
on:
push:
tags:
- '*'
env:
GOPATH: /go_path
GOCACHE: /go_cache
jobs:
goreleaser:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0 # all history for all branches and tags
- uses: actions/setup-go@v3
with:
go-version: '>=1.20.1'
- uses: https://gitea.com/actions/go-hashfiles@v0.0.1
id: hash-go
with:
patterns: |
go.mod
go.sum
- name: cache go
id: cache-go
uses: https://github.com/actions/cache@v3
with:
path: |
/go_path
/go_cache
key: go_path-${{ steps.hash-go.outputs.hash }}
- name: Import GPG key
id: import_gpg
uses: https://github.com/crazy-max/ghaction-import-gpg@v5
with:
gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }}
passphrase: ${{ secrets.PASSPHRASE }}
fingerprint: CC64B1DB67ABBEECAB24B6455FC346329753F4B0
- name: goreleaser
uses: https://github.com/goreleaser/goreleaser-action@v4
with:
distribution: goreleaser-pro
version: latest
args: release
env:
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
AWS_REGION: ${{ secrets.AWS_REGION }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
S3_REGION: ${{ secrets.AWS_REGION }}
S3_BUCKET: ${{ secrets.AWS_BUCKET }}
GORELEASER_FORCE_TOKEN: 'gitea'
GITEA_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GPG_FINGERPRINT: ${{ steps.import_gpg.outputs.fingerprint }}
release-image:
runs-on: ubuntu-latest
container:
image: catthehacker/ubuntu:act-latest
env:
DOCKER_ORG: gitea
DOCKER_LATEST: latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0 # all history for all branches and tags
- name: dockerfile lint check
uses: https://github.com/hadolint/hadolint-action@v3.1.0
with:
dockerfile: Dockerfile
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker BuildX
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Get Meta
id: meta
run: |
echo REPO_NAME=$(echo ${GITHUB_REPOSITORY} | awk -F"/" '{print $2}') >> $GITHUB_OUTPUT
echo REPO_VERSION=$(git describe --tags --always | sed 's/^v//') >> $GITHUB_OUTPUT
- name: Build and push
uses: docker/build-push-action@v4
env:
ACTIONS_RUNTIME_TOKEN: '' # See https://gitea.com/gitea/act_runner/issues/119
with:
context: .
file: ./Dockerfile
platforms: |
linux/amd64
linux/arm64
push: true
tags: |
${{ env.DOCKER_ORG }}/${{ steps.meta.outputs.REPO_NAME }}:${{ steps.meta.outputs.REPO_VERSION }}
${{ env.DOCKER_ORG }}/${{ steps.meta.outputs.REPO_NAME }}:${{ env.DOCKER_LATEST }}

42
.gitea/workflows/test.yml Normal file
View file

@ -0,0 +1,42 @@
name: checks
on:
- push
- pull_request
env:
GOPATH: /go_path
GOCACHE: /go_cache
jobs:
lint:
name: check and test
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: '>=1.20.1'
- uses: https://gitea.com/actions/go-hashfiles@v0.0.1
id: hash-go
with:
patterns: |
go.mod
go.sum
- name: cache go
id: cache-go
uses: https://github.com/actions/cache@v3
with:
path: |
/go_path
/go_cache
key: go_path-${{ steps.hash-go.outputs.hash }}
- name: vet checks
run: make vet
- name: build
run: make build
- name: test
run: make test
- name: dockerfile lint check
uses: https://github.com/hadolint/hadolint-action@v3.1.0
with:
dockerfile: Dockerfile

2
.gitignore vendored
View file

@ -10,5 +10,3 @@ coverage.txt
# MS VSCode
.vscode
__debug_bin
# gorelease binary folder
dist

View file

@ -1,11 +1,14 @@
linters:
enable:
- gosimple
- deadcode
- typecheck
- govet
- errcheck
- staticcheck
- unused
- structcheck
- varcheck
- dupl
#- gocyclo # The cyclomatic complexety of a lot of functions is too high, we should refactor those another time.
- gofmt
@ -109,6 +112,7 @@ issues:
- gocritic
- linters:
- unused
- deadcode
text: "swagger"
- path: contrib/pr/checkout.go
linters:
@ -150,6 +154,9 @@ issues:
- path: cmd/dump.go
linters:
- dupl
- path: services/webhook/webhook.go
linters:
- structcheck
- text: "commentFormatting: put a space between `//` and comment text"
linters:
- gocritic

12
.goreleaser.checksum.sh Normal file
View file

@ -0,0 +1,12 @@
#!/bin/bash
set -e
if [ -z "$1" ]; then
echo "usage: $0 <path>"
exit 1
fi
SUM=$(shasum -a 256 "$1" | cut -d' ' -f1)
BASENAME=$(basename "$1")
echo -n "${SUM} ${BASENAME}" > "$1".sha256

112
.goreleaser.yaml Normal file
View file

@ -0,0 +1,112 @@
before:
hooks:
- go mod tidy
builds:
- env:
- CGO_ENABLED=0
goos:
- darwin
- linux
- windows
- freebsd
goarch:
- amd64
- arm
- arm64
goarm:
- "5"
- "6"
- "7"
ignore:
- goos: darwin
goarch: arm
- goos: darwin
goarch: ppc64le
- goos: darwin
goarch: s390x
- goos: windows
goarch: ppc64le
- goos: windows
goarch: s390x
- goos: windows
goarch: arm
goarm: "5"
- goos: windows
goarch: arm
goarm: "6"
- goos: windows
goarch: arm
goarm: "7"
- goos: windows
goarch: arm64
- goos: freebsd
goarch: ppc64le
- goos: freebsd
goarch: s390x
- goos: freebsd
goarch: arm
goarm: "5"
- goos: freebsd
goarch: arm
goarm: "6"
- goos: freebsd
goarch: arm
goarm: "7"
- goos: freebsd
goarch: arm64
flags:
- -trimpath
ldflags:
- -s -w -X gitea.com/gitea/act_runner/internal/pkg/ver.version={{ .Summary }}
binary: >-
{{ .ProjectName }}-
{{- .Version }}-
{{- .Os }}-
{{- if eq .Arch "amd64" }}amd64
{{- else if eq .Arch "amd64_v1" }}amd64
{{- else if eq .Arch "386" }}386
{{- else }}{{ .Arch }}{{ end }}
{{- if .Arm }}-{{ .Arm }}{{ end }}
no_unique_dist_dir: true
hooks:
post:
- cmd: tar -cJf {{ .Path }}.xz {{ .Path }}
dir: ./dist/
env:
- XZ_OPT=-9
- cmd: sh .goreleaser.checksum.sh {{ .Path }}
- cmd: sh .goreleaser.checksum.sh {{ .Path }}.xz
blobs:
-
provider: s3
bucket: "{{ .Env.S3_BUCKET }}"
region: "{{ .Env.S3_REGION }}"
folder: "act_runner/{{.Version}}"
extra_files:
- glob: ./**.xz
- glob: ./**.sha256
archives:
- format: binary
name_template: "{{ .Binary }}"
allow_different_binary_count: true
checksum:
name_template: 'checksums.txt'
extra_files:
- glob: ./**.xz
snapshot:
name_template: "{{ .Branch }}-devel"
nightly:
name_template: "nightly"
gitea_urls:
api: https://gitea.com/api/v1
download: https://gitea.com
# yaml-language-server: $schema=https://goreleaser.com/static/schema-pro.json
# vim: set ts=2 sw=2 tw=0 fo=cnqoj

View file

@ -1,47 +1,15 @@
FROM --platform=$BUILDPLATFORM code.forgejo.org/oci/tonistiigi/xx AS xx
#Build stage
FROM golang:1.20-alpine3.17 AS build-env
FROM --platform=$BUILDPLATFORM code.forgejo.org/oci/golang:1.21-alpine3.19 as build-env
#
# Transparently cross compile for the target platform
#
COPY --from=xx / /
ARG TARGETPLATFORM
RUN apk --no-cache add clang lld
RUN xx-apk --no-cache add gcc musl-dev
RUN xx-go --wrap
# Do not remove `git` here, it is required for getting runner version when executing `make build`
RUN apk add --no-cache build-base git
RUN apk --no-cache add build-base git
COPY . /srv
WORKDIR /srv
RUN make build
RUN make clean && make build
FROM code.forgejo.org/oci/alpine:3.19
ARG RELEASE_VERSION
RUN apk add --no-cache git bash
FROM alpine:3.17
LABEL maintainer="contact@forgejo.org"
COPY --from=build-env /srv/forgejo-runner /bin/forgejo-runner
LABEL maintainer="contact@forgejo.org" \
org.opencontainers.image.authors="Forgejo" \
org.opencontainers.image.url="https://forgejo.org" \
org.opencontainers.image.documentation="https://forgejo.org/docs/latest/admin/actions/#forgejo-runner" \
org.opencontainers.image.source="https://code.forgejo.org/forgejo/runner" \
org.opencontainers.image.version="${RELEASE_VERSION}" \
org.opencontainers.image.vendor="Forgejo" \
org.opencontainers.image.licenses="MIT" \
org.opencontainers.image.title="Forgejo Runner" \
org.opencontainers.image.description="A runner for Forgejo Actions."
ENV HOME=/data
USER 1000:1000
WORKDIR /data
VOLUME ["/data"]
CMD ["/bin/forgejo-runner"]
ENTRYPOINT ["/bin/forgejo-runner"]

View file

@ -1,4 +1,3 @@
Copyright (c) 2023 The Forgejo Authors
Copyright (c) 2022 The Gitea Authors
Permission is hereby granted, free of charge, to any person obtaining a copy

View file

@ -7,7 +7,7 @@ GO ?= go
SHASUM ?= shasum -a 256
HAS_GO = $(shell hash $(GO) > /dev/null 2>&1 && echo "GO" || echo "NOGO" )
XGO_PACKAGE ?= src.techknowlogick.com/xgo@latest
XGO_VERSION := go-1.21.x
XGO_VERSION := go-1.18.x
GXZ_PAGAGE ?= github.com/ulikunitz/xz/cmd/gxz@v0.5.10
LINUX_ARCHS ?= linux/amd64,linux/arm64
@ -19,7 +19,6 @@ GOFILES := $(shell find . -type f -name "*.go" -o -name "go.mod" ! -name "genera
DOCKER_IMAGE ?= gitea/act_runner
DOCKER_TAG ?= nightly
DOCKER_REF := $(DOCKER_IMAGE):$(DOCKER_TAG)
DOCKER_ROOTLESS_REF := $(DOCKER_IMAGE):$(DOCKER_TAG)-dind-rootless
EXTLDFLAGS = -extldflags "-static" $(null)
@ -62,11 +61,8 @@ else
endif
endif
GO_PACKAGES_TO_VET ?= $(filter-out gitea.com/gitea/act_runner/internal/pkg/client/mocks,$(shell $(GO) list ./...))
TAGS ?=
LDFLAGS ?= -X "gitea.com/gitea/act_runner/internal/pkg/ver.version=v$(RELASE_VERSION)"
LDFLAGS ?= -X "gitea.com/gitea/act_runner/internal/pkg/ver.version=$(RELASE_VERSION)"
all: build
@ -104,7 +100,8 @@ test: fmt-check
.PHONY: vet
vet:
@echo "Running go vet..."
@$(GO) vet $(GO_PACKAGES_TO_VET)
@$(GO) build code.gitea.io/gitea-vet
@$(GO) vet -vettool=gitea-vet ./...
install: $(GOFILES)
$(GO) install -v -tags '$(TAGS)' -ldflags '$(EXTLDFLAGS)-s -w $(LDFLAGS)'
@ -165,7 +162,6 @@ docker:
ARG_DISABLE_CONTENT_TRUST=--disable-content-trust=false; \
fi; \
docker build $${ARG_DISABLE_CONTENT_TRUST} -t $(DOCKER_REF) .
docker build $${ARG_DISABLE_CONTENT_TRUST} -t $(DOCKER_ROOTLESS_REF) -f Dockerfile.rootless .
clean:
$(GO) clean -x -i ./...

153
README.md
View file

@ -1,94 +1,119 @@
# Forgejo Runner
# act runner
**WARNING:** this is [alpha release quality](https://en.wikipedia.org/wiki/Software_release_life_cycle#Alpha) code and should not be considered secure enough to deploy in production.
Act runner is a runner for Gitea based on [Gitea fork](https://gitea.com/gitea/act) of [act](https://github.com/nektos/act).
A daemon that connects to a Forgejo instance and runs jobs for continous integration. The [installation and usage instructions](https://forgejo.org/docs/next/admin/actions/) are part of the Forgejo documentation.
## Installation
# Reporting bugs
### Prerequisites
When filing a bug in [the issue tracker](https://code.forgejo.org/forgejo/runner/issues), it is very helpful to propose a pull request [in the end-to-end tests](https://code.forgejo.org/forgejo/end-to-end/src/branch/main/actions) repository that adds a reproducer. It will fail the CI and unambiguously demonstrate that the problem exists. In most cases it is enough to add a workflow ([see the echo example](https://code.forgejo.org/forgejo/end-to-end/src/branch/main/actions/example-echo)). For more complicated cases it is also possible to add a runner config file as well as shell scripts to setup and teardown the test case ([see the service example](https://code.forgejo.org/forgejo/end-to-end/src/branch/main/actions/example-service)).
Docker Engine Community version is required for docker mode. To install Docker CE, follow the official [install instructions](https://docs.docker.com/engine/install/).
# Hacking
### Download pre-built binary
The Forgejo runner depends on [a fork of ACT](https://code.forgejo.org/forgejo/act) and is a dependency of the [setup-forgejo action](https://code.forgejo.org/actions/setup-forgejo). See [the full dependency graph](https://code.forgejo.org/actions/cascading-pr/#forgejo-dependencies) for a global view.
Visit https://dl.gitea.com/act_runner/ and download the right version for your platform.
## Local debug
### Build from source
The repositories are checked out in the same directory:
- **runner**: [Forgejo runner](https://code.forgejo.org/forgejo/runner)
- **act**: [ACT](https://code.forgejo.org/forgejo/act)
- **setup-forgejo**: [setup-forgejo](https://code.forgejo.org/actions/setup-forgejo)
### Install dependencies
The dependencies are installed manually or with:
```shell
setup-forgejo/forgejo-dependencies.sh
```bash
make build
```
### Build the Forgejo runner with the local ACT
### Build a docker image
The Forgejo runner is rebuilt with the ACT directory by changing the `runner/go.mod` file to:
```
replace github.com/nektos/act => ../act
```bash
make docker
```
Running:
## Quickstart
```
cd runner ; go mod tidy
### Register
```bash
./act_runner register
```
Building:
And you will be asked to input:
```shell
cd runner ; rm -f forgejo-runner ; make forgejo-runner
1. Gitea instance URL, like `http://192.168.8.8:3000/`. You should use your gitea instance ROOT_URL as the instance argument
and you should not use `localhost` or `127.0.0.1` as instance IP;
2. Runner token, you can get it from `http://192.168.8.8:3000/admin/runners`;
3. Runner name, you can just leave it blank;
4. Runner labels, you can just leave it blank.
The process looks like:
```text
INFO Registering runner, arch=amd64, os=darwin, version=0.1.5.
WARN Runner in user-mode.
INFO Enter the Gitea instance URL (for example, https://gitea.com/):
http://192.168.8.8:3000/
INFO Enter the runner token:
fe884e8027dc292970d4e0303fe82b14xxxxxxxx
INFO Enter the runner name (if set empty, use hostname: Test.local):
INFO Enter the runner labels, leave blank to use the default labels (comma-separated, for example, ubuntu-20.04:docker://node:16-bullseye,ubuntu-18.04:docker://node:16-buster,linux_arm:host):
INFO Registering runner, name=Test.local, instance=http://192.168.8.8:3000/, labels=[ubuntu-latest:docker://node:16-bullseye ubuntu-22.04:docker://node:16-bullseye ubuntu-20.04:docker://node:16-bullseye ubuntu-18.04:docker://node:16-buster].
DEBU Successfully pinged the Gitea instance server
INFO Runner registered successfully.
```
### Launch Forgejo and the runner
You can also register with command line arguments.
A Forgejo instance is launched with:
```shell
cd setup-forgejo
./forgejo.sh setup
firefox $(cat forgejo-url)
```bash
./act_runner register --instance http://192.168.8.8:3000 --token <my_runner_token> --no-interactive
```
The user is `root` with password `admin1234`. The runner is registered with:
If the registry succeed, it will run immediately. Next time, you could run the runner directly.
```
cd setup-forgejo
docker exec --user 1000 forgejo forgejo actions generate-runner-token > forgejo-runner-token
../runner/forgejo-runner register --no-interactive --instance "$(cat forgejo-url)" --name runner --token $(cat forgejo-runner-token) --labels docker:docker://node:20-bullseye,self-hosted:host://-self-hosted,lxc:lxc://debian:bullseye
### Run
```bash
./act_runner daemon
```
And launched with:
### Configuration
```shell
cd setup-forgejo ; ../runner/forgejo-runner --config runner-config.yml daemon
You can also configure the runner with a configuration file.
The configuration file is a YAML file, you can generate a sample configuration file with `./act_runner generate-config`.
```bash
./act_runner generate-config > config.yaml
```
Note that the `runner-config.yml` is required in that particular case
to configure the network in `bridge` mode, otherwise the runner will
create a network that cannot reach the forgejo instance.
You can specify the configuration file path with `-c`/`--config` argument.
### Try a sample workflow
From the Forgejo web interface, create a repository and add the
following to `.forgejo/workflows/try.yaml`. It will launch the job and
the result can be observed from the `actions` tab.
```yaml
on: [push]
jobs:
ls:
runs-on: docker
steps:
- uses: actions/checkout@v3
- run: |
ls ${{ github.workspace }}
```bash
./act_runner -c config.yaml register # register with config file
./act_runner -c config.yaml deamon # run with config file
```
### Run a docker container
```sh
docker run -e GITEA_INSTANCE_URL=http://192.168.8.18:3000 -e GITEA_RUNNER_REGISTRATION_TOKEN=<runner_token> -v /var/run/docker.sock:/var/run/docker.sock -v $PWD/data:/data --name my_runner gitea/act_runner:nightly
```
The `/data` directory inside the docker container contains the runner API keys after registration.
It must be persisted, otherwise the runner would try to register again, using the same, now defunct registration token.
### Running in docker-compose
```yml
...
gitea:
image: gitea/gitea
...
runner:
image: gitea/act_runner
restart: always
depends_on:
- gitea
volumes:
- ./data/act_runner:/data
- /var/run/docker.sock:/var/run/docker.sock
environment:
- GITEA_INSTANCE_URL=<instance url>
- GITEA_RUNNER_REGISTRATION_TOKEN=<registration token>
```

View file

@ -1,102 +0,0 @@
# Release Notes
## 3.5.2
* Fix [crash in some cases when the YAML structure is not as expected](https://code.forgejo.org/forgejo/runner/issues/267).
## 3.5.1
* Fix [CVE-2024-24557](https://nvd.nist.gov/vuln/detail/CVE-2024-24557)
* [Add report_interval option to config](https://code.forgejo.org/forgejo/runner/pulls/220) to allow setting the interval of status and log reports
## 3.5.0
* [Allow graceful shutdowns](https://code.forgejo.org/forgejo/runner/pulls/202): when receiving a signal (INT or TERM) wait for running jobs to complete (up to shutdown_timeout).
* [Fix label declaration](https://code.forgejo.org/forgejo/runner/pulls/176): Runner in daemon mode now takes labels found in config.yml into account when declaration was successful.
* [Fix the docker compose example](https://code.forgejo.org/forgejo/runner/pulls/175) to workaround the race on labels.
* [Fix the kubernetes dind example](https://code.forgejo.org/forgejo/runner/pulls/169).
* [Rewrite ::group:: and ::endgroup:: commands like github](https://code.forgejo.org/forgejo/runner/pulls/183).
* [Added opencontainers labels to the image](https://code.forgejo.org/forgejo/runner/pulls/195)
* [Upgrade the default container to node:20](https://code.forgejo.org/forgejo/runner/pulls/203)
## 3.4.1
* Fixes a regression introduced in 3.4.0 by which a job with no image explicitly set would
[be bound to the host](https://code.forgejo.org/forgejo/runner/issues/165)
network instead of a custom network (empty string in the configuration file).
## 3.4.0
Although this version is able to run [actions/upload-artifact@v4](https://code.forgejo.org/actions/upload-artifact/src/tag/v4) and [actions/download-artifact@v4](https://code.forgejo.org/actions/download-artifact/src/tag/v4), these actions will fail because it does not run against GitHub.com. A fork of those two actions with this check disabled is made available at:
* https://code.forgejo.org/forgejo/upload-artifact/src/tag/v4
* https://code.forgejo.org/forgejo/download-artifact/src/tag/v4
and they can be used as shown in [an example from the end-to-end test suite](https://code.forgejo.org/forgejo/end-to-end/src/branch/main/actions/example-artifacts-v4/.forgejo/workflows/test.yml).
* When running against codeberg.org, the default poll frequency is 30s instead of 2s.
* Fix compatibility issue with actions/{upload,download}-artifact@v4.
* Upgrade ACT v1.20.0 which brings:
* `[container].options` from the config file is exposed in containers created by the workflows
* the expressions in the value of `jobs.<job-id>.runs-on` are evaluated
* fix a bug causing the evaluated expression of `jobs.<job-id>.runs-on` to fail if it was an array
* mount `act-toolcache:/opt/hostedtoolcache` instead of `act-toolcache:/toolcache`
* a few improvements to the readability of the error messages displayed in the logs
* `amd64` can be used instead of `x86_64` and `arm64` intead of `aarch64` when specifying the architecture
* fixed YAML parsing bugs preventing dispatch workflows to be parsed correctly
* add support for `runs-on.labels` which is equivalent to `runs-on` followed by a list of labels
* the expressions in the service `ports` and `volumes` values are evaluated
* network aliases are only supported when the network is user specified, not when it is provided by the runner
* If `[runner].insecure` is true in the configuration, insecure cloning actions is allowed
## 3.3.0
* Support IPv6 with addresses from a private range and NAT for
docker:// with --enable-ipv6 and [container].enable_ipv6
lxc:// always
## 3.2.0
* Support LXC container capabilities via `lxc:lxc://debian:bookworm:k8s` or `lxc:lxc://debian:bookworm:docker lxc k8s`
* Update ACT v1.16.0 to resolve a [race condition when bootstraping LXC templates](https://code.forgejo.org/forgejo/act/pulls/23)
## 3.1.0
The `self-hosted` label that was hardwired to be a LXC container
running `debian:bullseye` was reworked and documented ([user guide](https://forgejo.org/docs/next/user/actions/#jobsjob_idruns-on) and [admin guide](https://forgejo.org/docs/next/admin/actions/#labels-and-runs-on)).
There now are two different schemes: `lxc://` for LXC containers and
`host://` for running directly on the host.
* Support the `host://` scheme for running directly on the host.
* Support the `lxc://` scheme in labels
* Update [code.forgejo.org/forgejo/act v1.14.0](https://code.forgejo.org/forgejo/act/pulls/19) to implement both self-hosted and LXC schemes
## 3.0.3
* Update [code.forgejo.org/forgejo/act v1.13.0](https://code.forgejo.org/forgejo/runner/pulls/106) to keep up with github.com/nektos/act
## 3.0.2
* Update [code.forgejo.org/forgejo/act v1.12.0](https://code.forgejo.org/forgejo/runner/pulls/106) to upgrade the node installed in the LXC container to node20
## 3.0.1
* Update [code.forgejo.org/forgejo/act v1.11.0](https://code.forgejo.org/forgejo/runner/pulls/86) to resolve a bug preventing actions based on node20 from running, such as [checkout@v4](https://code.forgejo.org/actions/checkout/src/tag/v4).
## 3.0.0
* Publish a rootless OCI image
* Refactor the release process
## 2.5.0
* Update [code.forgejo.org/forgejo/act v1.10.0](https://code.forgejo.org/forgejo/runner/pulls/71)
## 2.4.0
* Update [code.forgejo.org/forgejo/act v1.9.0](https://code.forgejo.org/forgejo/runner/pulls/64)
## 2.3.0
* Add support for [offline registration](https://forgejo.org/docs/next/admin/actions/#offline-registration).

View file

@ -1,18 +0,0 @@
[Unit]
Description=Forgejo Runner
Documentation=https://forgejo.org/docs/latest/admin/actions/
After=docker.service
[Service]
ExecStart=forgejo-runner daemon
ExecReload=/bin/kill -s HUP $MAINPID
# This user and working directory must already exist
User=runner
WorkingDirectory=/home/runner
Restart=on-failure
TimeoutSec=0
RestartSec=10
[Install]
WantedBy=multi-user.target

View file

@ -1,10 +0,0 @@
This directory contains a collection of usage and deployment examples.
Workflow examples can be found [in the documentation](https://forgejo.org/docs/next/user/actions/)
and in the [sources of the setup-forgejo](https://code.forgejo.org/actions/setup-forgejo/src/branch/main/testdata) action.
| Section | Description |
|-----------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| [`docker`](docker) | using the host docker server by mounting the socket |
| [`docker-compose`](docker-compose) | all in one docker-compose with the Forgejo server, the runner and docker in docker |
| [`kubernetes`](kubernetes) | a sample deployment for the Forgejo runner |

View file

@ -1,113 +0,0 @@
## Docker compose with docker-in-docker
The `compose-forgejo-and-runner.yml` compose file runs a Forgejo
instance and registers a `Forgejo runner`. A docker server is also
launched within a container (using
[dind](https://hub.docker.com/_/docker/tags?name=dind)) and will be
used by the `Forgejo runner` to execute the workflows.
### Quick start
```sh
rm -fr /srv/runner-data /srv/forgejo-data
secret=$(openssl rand -hex 20)
sed -i -e "s/{SHARED_SECRET}/$secret/" compose-forgejo-and-runner.yml
docker compose -f compose-forgejo-and-runner.yml up -d
```
Visit http://0.0.0.0:8080/admin/actions/runners with login `root` and password `{ROOT_PASSWORD}` and see the runner is registered with the label `docker`.
> NOTE: the `Your ROOT_URL in app.ini is "http://localhost:3000/", it's unlikely matching the site you are visiting.` message is a warning that can be ignored in the context of this example.
```sh
docker compose -f compose-forgejo-and-runner.yml -f compose-demo-workflow.yml up demo-workflow
```
Visit http://0.0.0.0:8080/root/test/actions/runs/1 and see that the job ran.
### Running
Create a shared secret with:
```sh
openssl rand -hex 20
```
Replace all occurences of {SHARED_SECRET} in
[compose-forgejo-and-runner.yml](compose-forgejo-and-runner.yml).
> **NOTE:** a token obtained from the Forgejo web interface cannot be used as a shared secret.
Replace {ROOT_PASSWORD} with a secure password in
[compose-forgejo-and-runner.yml](compose-forgejo-and-runner.yml).
```sh
docker compose -f compose-forgejo-and-runner.yml up
Creating docker-compose_docker-in-docker_1 ... done
Creating docker-compose_forgejo_1 ... done
Creating docker-compose_runner-register_1 ... done
...
docker-in-docker_1 | time="2023-08-24T10:22:15.023338461Z" level=warning msg="WARNING: API is accessible on http://0.0.0.0:2376
...
forgejo_1 | 2023/08/24 10:22:14 ...s/graceful/server.go:75:func1() [D] Starting server on tcp:0.0.0.0:3000 (PID: 19)
...
runner-daemon_1 | time="2023-08-24T10:22:16Z" level=info msg="Starting runner daemon"
```
### Manual testing
To login the Forgejo instance:
* URL: http://0.0.0.0:8080
* user: `root`
* password: `{ROOT_PASSWORD}`
`Forgejo Actions` is enabled by default when creating a repository.
## Tests workflow
The `compose-demo-workflow.yml` compose file runs two demo workflows:
* one to verify the `Forgejo runner` can pick up a task from the Forgejo instance
and run it to completion.
* one to verify docker can be run inside the `Forgejo runner` container.
A new repository is created in root/test with the following workflows:
#### `.forgejo/workflows/demo.yml`:
```yaml
on: [push]
jobs:
test:
runs-on: docker
steps:
- run: echo All Good
```
#### `.forgejo/workflows/demo_docker.yml`
```yaml
on: [push]
jobs:
test_docker:
runs-on: ubuntu-22.04
steps:
- run: docker info
```
A wait loop expects the status of the check associated with the
commit in Forgejo to show "success" to assert the workflow was run.
### Running
```sh
$ docker-compose -f compose-forgejo-and-runner.yml -f compose-demo-workflow.yml up demo-workflow
...
demo-workflow_1 | To http://forgejo:3000/root/test
demo-workflow_1 | + 5ce134e...261cc79 main -> main (forced update)
demo-workflow_1 | branch 'main' set up to track 'http://root:admin1234@forgejo:3000/root/test/main'.
...
demo-workflow_1 | running
...
```

View file

@ -1,35 +0,0 @@
# Copyright 2024 The Forgejo Authors.
# SPDX-License-Identifier: MIT
services:
demo-workflow:
image: code.forgejo.org/oci/alpine:3.19
links:
- forgejo
command: >-
sh -ec '
apk add --quiet git curl jq ;
mkdir -p /srv/demo ;
cd /srv/demo ;
git init --initial-branch=main ;
mkdir -p .forgejo/workflows ;
echo "{ on: [push], jobs: { test: { runs-on: docker, steps: [ {uses: actions/checkout@v4}, { run: echo All Good } ] } } }" > .forgejo/workflows/demo.yml ;
echo "{ on: [push], jobs: { test_docker: { runs-on: ubuntu-22.04, steps: [ { run: docker info } ] } } }" > .forgejo/workflows/demo_docker.yml ;
git add . ;
git config user.email root@example.com ;
git config user.name username ;
git commit -m demo ;
while : ; do
git push --set-upstream --force http://root:{ROOT_PASSWORD}@forgejo:3000/root/test main && break ;
sleep 5 ;
done ;
sha=`git rev-parse HEAD` ;
for delay in 1 1 1 1 2 5 5 10 10 10 15 30 30 30 30 30 30 30 ; do
curl -sS -f http://forgejo:3000/api/v1/repos/root/test/commits/$$sha/status | jq --raw-output .state | tee status ;
if grep success status ; then echo DEMO WORKFLOW SUCCESS && break ; fi ;
if grep failure status ; then echo DEMO WORKFLOW FAILURE && break ; fi ;
sleep $$delay ;
done ;
grep success status || echo DEMO WORKFLOW FAILURE
'

View file

@ -1,93 +0,0 @@
# Copyright 2024 The Forgejo Authors.
# SPDX-License-Identifier: MIT
#
# Create a secret with:
#
# openssl rand -hex 20
#
# Replace all occurences of {SHARED_SECRET} below with the output.
#
# NOTE: a token obtained from the Forgejo web interface cannot be used
# as a shared secret.
#
# Replace {ROOT_PASSWORD} with a secure password
#
volumes:
docker_certs:
services:
docker-in-docker:
image: code.forgejo.org/oci/docker:dind
hostname: docker # Must set hostname as TLS certificates are only valid for docker or localhost
privileged: true
environment:
DOCKER_TLS_CERTDIR: /certs
DOCKER_HOST: docker-in-docker
volumes:
- docker_certs:/certs
forgejo:
image: codeberg.org/forgejo/forgejo:1.21
command: >-
bash -c '
/bin/s6-svscan /etc/s6 &
sleep 10 ;
su -c "forgejo forgejo-cli actions register --secret {SHARED_SECRET}" git ;
su -c "forgejo admin user create --admin --username root --password {ROOT_PASSWORD} --email root@example.com" git ;
sleep infinity
'
environment:
FORGEJO__security__INSTALL_LOCK: "true"
FORGEJO__log__LEVEL: "debug"
FORGEJO__repository__ENABLE_PUSH_CREATE_USER: "true"
FORGEJO__repository__DEFAULT_PUSH_CREATE_PRIVATE: "false"
FORGEJO__repository__DEFAULT_REPO_UNITS: "repo.code,repo.actions"
volumes:
- /srv/forgejo-data:/data
ports:
- 8080:3000
runner-register:
image: code.forgejo.org/forgejo/runner:3.4.1
links:
- docker-in-docker
- forgejo
environment:
DOCKER_HOST: tcp://docker-in-docker:2376
volumes:
- /srv/runner-data:/data
user: 0:0
command: >-
bash -ec '
while : ; do
forgejo-runner create-runner-file --connect --instance http://forgejo:3000 --name runner --secret {SHARED_SECRET} && break ;
sleep 1 ;
done ;
sed -i -e "s|\"labels\": null|\"labels\": [\"docker:docker://code.forgejo.org/oci/node:20-bookworm\", \"ubuntu-22.04:docker://catthehacker/ubuntu:act-22.04\"]|" .runner ;
forgejo-runner generate-config > config.yml ;
sed -i -e "s|network: .*|network: host|" config.yml ;
sed -i -e "s|^ envs:$$| envs:\n DOCKER_HOST: tcp://docker:2376\n DOCKER_TLS_VERIFY: 1\n DOCKER_CERT_PATH: /certs/client|" config.yml ;
sed -i -e "s|^ options:| options: -v /certs/client:/certs/client|" config.yml ;
sed -i -e "s| valid_volumes: \[\]$$| valid_volumes:\n - /certs/client|" config.yml ;
chown -R 1000:1000 /data
'
runner-daemon:
image: code.forgejo.org/forgejo/runner:3.4.1
links:
- docker-in-docker
- forgejo
environment:
DOCKER_HOST: tcp://docker:2376
DOCKER_CERT_PATH: /certs/client
DOCKER_TLS_VERIFY: "1"
volumes:
- /srv/runner-data:/data
- docker_certs:/certs
command: >-
bash -c '
while : ; do test -w .runner && forgejo-runner --config config.yml daemon ; sleep 1 ; done
'

View file

@ -1,12 +0,0 @@
The following assumes:
* a docker server runs on the host
* the docker group of the host is GID 133
* a `.runner` file exists in /tmp/data
* a `runner-config.yml` file exists in /tmp/data
```sh
docker run -v /var/run/docker.sock:/var/run/docker.sock -v /tmp/data:/data --user 1000:133 --rm code.forgejo.org/forgejo/runner:3.0.0 forgejo-runner --config runner-config.yaml daemon
```
The workflows will run using the host docker srever

View file

@ -1,7 +0,0 @@
## Kubernetes Docker in Docker Deployment
Registers Kubernetes pod runners using [offline registration](https://forgejo.org/docs/v1.21/admin/actions/#offline-registration), allowing the scaling of runners as needed.
NOTE: Docker in Docker (dind) requires elevated privileges on Kubernetes. The current way to achieve this is to set the pod `SecurityContext` to `privileged`. Keep in mind that this is a potential security issue that has the potential for a malicious application to break out of the container context.
[`dind-docker.yaml`](dind-docker.yaml) creates a deployment and secret for Kubernetes to act as a runner. The Docker credentials are re-generated each time the pod connects and does not need to be persisted.

View file

@ -1,87 +0,0 @@
# Secret data.
# You will need to retrive this from the web UI, and your Forgejo instance must be running v1.21+
# Alternatively, create this with
# kubectl create secret generic runner-secret --from-literal=token=your_offline_token_here
apiVersion: v1
stringData:
token: your_offline_secret_here
kind: Secret
metadata:
name: runner-secret
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: forgejo-runner
name: forgejo-runner
spec:
# Two replicas means that if one is busy, the other can pick up jobs.
replicas: 2
selector:
matchLabels:
app: forgejo-runner
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: forgejo-runner
spec:
restartPolicy: Always
volumes:
- name: docker-certs
emptyDir: {}
- name: runner-data
emptyDir: {}
# Initialise our configuration file using offline registration
# https://forgejo.org/docs/v1.21/admin/actions/#offline-registration
initContainers:
- name: runner-register
image: code.forgejo.org/forgejo/runner:3.2.0
command: ["forgejo-runner", "register", "--no-interactive", "--token", $(RUNNER_SECRET), "--name", $(RUNNER_NAME), "--instance", $(FORGEJO_INSTANCE_URL)]
env:
- name: RUNNER_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: RUNNER_SECRET
valueFrom:
secretKeyRef:
name: runner-secret
key: token
- name: FORGEJO_INSTANCE_URL
value: http://forgejo-http.forgejo.svc.cluster.local:3000
resources:
limits:
cpu: "0.50"
memory: "64Mi"
volumeMounts:
- name: runner-data
mountPath: /data
containers:
- name: runner
image: code.forgejo.org/forgejo/runner:3.0.0
command: ["sh", "-c", "while ! nc -z localhost 2376 </dev/null; do echo 'waiting for docker daemon...'; sleep 5; done; forgejo-runner daemon"]
env:
- name: DOCKER_HOST
value: tcp://localhost:2376
- name: DOCKER_CERT_PATH
value: /certs/client
- name: DOCKER_TLS_VERIFY
value: "1"
volumeMounts:
- name: docker-certs
mountPath: /certs
- name: runner-data
mountPath: /data
- name: daemon
image: docker:23.0.6-dind
env:
- name: DOCKER_TLS_CERTDIR
value: /certs
securityContext:
privileged: true
volumeMounts:
- name: docker-certs
mountPath: /certs

156
go.mod
View file

@ -1,105 +1,113 @@
module gitea.com/gitea/act_runner
go 1.21.13
toolchain go1.23.1
go 1.20
require (
code.gitea.io/actions-proto-go v0.4.0
code.gitea.io/gitea-vet v0.2.3
connectrpc.com/connect v1.17.0
github.com/avast/retry-go/v4 v4.6.0
github.com/docker/docker v25.0.6+incompatible
github.com/google/uuid v1.6.0
code.gitea.io/actions-proto-go v0.2.1
code.gitea.io/gitea-vet v0.2.3-0.20230113022436-2b1561217fa5
github.com/avast/retry-go/v4 v4.3.1
github.com/bufbuild/connect-go v1.3.1
github.com/docker/docker v23.0.1+incompatible
github.com/go-chi/chi/v5 v5.0.8
github.com/go-chi/render v1.0.2
github.com/joho/godotenv v1.5.1
github.com/mattn/go-isatty v0.0.20
github.com/nektos/act v0.2.49
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.8.1
github.com/stretchr/testify v1.9.0
golang.org/x/term v0.24.0
golang.org/x/time v0.6.0
google.golang.org/protobuf v1.34.2
github.com/mattn/go-isatty v0.0.18
github.com/mattn/go-sqlite3 v1.14.16
github.com/nektos/act v0.0.0
github.com/sirupsen/logrus v1.9.0
github.com/spf13/cobra v1.6.1
github.com/stretchr/testify v1.8.1
golang.org/x/term v0.7.0
golang.org/x/time v0.1.0
google.golang.org/protobuf v1.28.1
gopkg.in/yaml.v3 v3.0.1
gotest.tools/v3 v3.5.1
gotest.tools/v3 v3.4.0
modernc.org/sqlite v1.22.0
xorm.io/builder v0.3.11-0.20220531020008-1bd24a7dc978
xorm.io/xorm v1.3.2
)
require (
dario.cat/mergo v1.0.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
github.com/Masterminds/semver v1.5.0 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
github.com/cloudflare/circl v1.3.7 // indirect
github.com/containerd/containerd v1.7.13 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/creack/pty v1.1.21 // indirect
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/ProtonMail/go-crypto v0.0.0-20220404123522-616f957b79ad // indirect
github.com/acomagu/bufpipe v1.0.3 // indirect
github.com/ajg/form v1.5.1 // indirect
github.com/containerd/containerd v1.6.18 // indirect
github.com/creack/pty v1.1.18 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/distribution/reference v0.5.0 // indirect
github.com/docker/cli v25.0.3+incompatible // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker-credential-helpers v0.8.0 // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/cli v23.0.1+incompatible // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
github.com/fatih/color v1.16.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
github.com/go-git/go-billy/v5 v5.5.0 // indirect
github.com/go-git/go-git/v5 v5.11.0 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/emirpasic/gods v1.12.0 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/go-git/gcfg v1.5.0 // indirect
github.com/go-git/go-billy/v5 v5.4.1 // indirect
github.com/go-git/go-git/v5 v5.4.2 // indirect
github.com/goccy/go-json v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/julienschmidt/httprouter v1.3.0 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/klauspost/compress v1.17.4 // indirect
github.com/klauspost/compress v1.15.12 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/buildkit v0.13.2 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.1.2 // indirect
github.com/moby/buildkit v0.11.4 // indirect
github.com/moby/patternmatcher v0.5.0 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/sys/user v0.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/onsi/ginkgo v1.12.1 // indirect
github.com/onsi/gomega v1.10.3 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0-rc5 // indirect
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
github.com/opencontainers/runc v1.1.3 // indirect
github.com/opencontainers/selinux v1.11.0 // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rhysd/actionlint v1.6.27 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/sergi/go-diff v1.3.1 // indirect
github.com/skeema/knownhosts v1.2.1 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rhysd/actionlint v1.6.23 // indirect
github.com/rivo/uniseg v0.4.3 // indirect
github.com/robfig/cron v1.2.0 // indirect
github.com/sergi/go-diff v1.2.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/syndtr/goleveldb v1.0.0 // indirect
github.com/xanzy/ssh-agent v0.3.1 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
go.etcd.io/bbolt v1.3.9 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
go.opentelemetry.io/otel v1.21.0 // indirect
go.opentelemetry.io/otel/metric v1.21.0 // indirect
go.opentelemetry.io/otel/trace v1.21.0 // indirect
golang.org/x/crypto v0.21.0 // indirect
golang.org/x/mod v0.13.0 // indirect
golang.org/x/net v0.23.0 // indirect
golang.org/x/sync v0.6.0 // indirect
golang.org/x/sys v0.25.0 // indirect
golang.org/x/tools v0.14.0 // indirect
golang.org/x/crypto v0.2.0 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/net v0.9.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.7.0 // indirect
golang.org/x/tools v0.8.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
lukechampine.com/uint128 v1.3.0 // indirect
modernc.org/cc/v3 v3.40.0 // indirect
modernc.org/ccgo/v3 v3.16.13 // indirect
modernc.org/libc v1.22.5 // indirect
modernc.org/mathutil v1.5.0 // indirect
modernc.org/memory v1.5.0 // indirect
modernc.org/opt v0.1.3 // indirect
modernc.org/strutil v1.1.3 // indirect
modernc.org/token v1.1.0 // indirect
)
replace github.com/nektos/act => code.forgejo.org/forgejo/act v1.21.3
replace github.com/nektos/act => code.forgejo.org/forgejo/act v1.5.1

1022
go.sum

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,11 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
//go:build cgo
// +build cgo
package artifactcache
import _ "github.com/mattn/go-sqlite3"
var sqliteDriverName = "sqlite3"

View file

@ -0,0 +1,11 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
//go:build !cgo
// +build !cgo
package artifactcache
import _ "modernc.org/sqlite"
var sqliteDriverName = "sqlite"

View file

@ -0,0 +1,12 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
// Package artifactcache provides a cache handler for the runner.
//
// Inspired by https://github.com/sp-ricard-valverde/github-act-cache-server
//
// TODO: Authorization
// TODO: Restrictions for accessing a cache, see https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache
// TODO: Force deleting cache entries, see https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#force-deleting-cache-entries
package artifactcache

View file

@ -0,0 +1,415 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package artifactcache
import (
"context"
"fmt"
"net"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/go-chi/render"
log "github.com/sirupsen/logrus"
"xorm.io/builder"
"xorm.io/xorm"
)
const (
urlBase = "/_apis/artifactcache"
)
var logger = log.StandardLogger().WithField("module", "cache_request")
type Handler struct {
engine engine
storage *Storage
router *chi.Mux
listener net.Listener
gc atomic.Bool
gcAt time.Time
outboundIP string
}
func StartHandler(dir, outboundIP string, port uint16) (*Handler, error) {
h := &Handler{}
if dir == "" {
if home, err := os.UserHomeDir(); err != nil {
return nil, err
} else {
dir = filepath.Join(home, ".cache", "actcache")
}
}
if err := os.MkdirAll(dir, 0o755); err != nil {
return nil, err
}
e, err := xorm.NewEngine(sqliteDriverName, filepath.Join(dir, "sqlite.db"))
if err != nil {
return nil, err
}
if err := e.Sync(&Cache{}); err != nil {
return nil, err
}
h.engine = engine{e: e}
storage, err := NewStorage(filepath.Join(dir, "cache"))
if err != nil {
return nil, err
}
h.storage = storage
if outboundIP != "" {
h.outboundIP = outboundIP
} else if ip, err := getOutboundIP(); err != nil {
return nil, err
} else {
h.outboundIP = ip.String()
}
router := chi.NewRouter()
router.Use(middleware.RequestLogger(&middleware.DefaultLogFormatter{Logger: logger}))
router.Use(func(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
handler.ServeHTTP(w, r)
go h.gcCache()
})
})
router.Use(middleware.Logger)
router.Route(urlBase, func(r chi.Router) {
r.Get("/cache", h.find)
r.Route("/caches", func(r chi.Router) {
r.Post("/", h.reserve)
r.Route("/{id}", func(r chi.Router) {
r.Patch("/", h.upload)
r.Post("/", h.commit)
})
})
r.Get("/artifacts/{id}", h.get)
r.Post("/clean", h.clean)
})
h.router = router
h.gcCache()
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) // listen on all interfaces
if err != nil {
return nil, err
}
go func() {
if err := http.Serve(listener, h.router); err != nil {
logger.Errorf("http serve: %v", err)
}
}()
h.listener = listener
return h, nil
}
func (h *Handler) ExternalURL() string {
// TODO: make the external url configurable if necessary
return fmt.Sprintf("http://%s:%d",
h.outboundIP,
h.listener.Addr().(*net.TCPAddr).Port)
}
// GET /_apis/artifactcache/cache
func (h *Handler) find(w http.ResponseWriter, r *http.Request) {
keys := strings.Split(r.URL.Query().Get("keys"), ",")
version := r.URL.Query().Get("version")
cache, err := h.findCache(r.Context(), keys, version)
if err != nil {
responseJson(w, r, 500, err)
return
}
if cache == nil {
responseJson(w, r, 204)
return
}
if ok, err := h.storage.Exist(cache.ID); err != nil {
responseJson(w, r, 500, err)
return
} else if !ok {
_ = h.engine.Exec(func(sess *xorm.Session) error {
_, err := sess.Delete(cache)
return err
})
responseJson(w, r, 204)
return
}
responseJson(w, r, 200, map[string]any{
"result": "hit",
"archiveLocation": fmt.Sprintf("%s%s/artifacts/%d", h.ExternalURL(), urlBase, cache.ID),
"cacheKey": cache.Key,
})
}
// POST /_apis/artifactcache/caches
func (h *Handler) reserve(w http.ResponseWriter, r *http.Request) {
cache := &Cache{}
if err := render.Bind(r, cache); err != nil {
responseJson(w, r, 400, err)
return
}
if ok, err := h.engine.ExecBool(func(sess *xorm.Session) (bool, error) {
return sess.Where(builder.Eq{"key": cache.Key, "version": cache.Version}).Get(&Cache{})
}); err != nil {
responseJson(w, r, 500, err)
return
} else if ok {
responseJson(w, r, 400, fmt.Errorf("already exist"))
return
}
if err := h.engine.Exec(func(sess *xorm.Session) error {
_, err := sess.Insert(cache)
return err
}); err != nil {
responseJson(w, r, 500, err)
return
}
responseJson(w, r, 200, map[string]any{
"cacheId": cache.ID,
})
return
}
// PATCH /_apis/artifactcache/caches/:id
func (h *Handler) upload(w http.ResponseWriter, r *http.Request) {
id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64)
if err != nil {
responseJson(w, r, 400, err)
return
}
cache := &Cache{
ID: id,
}
if ok, err := h.engine.ExecBool(func(sess *xorm.Session) (bool, error) {
return sess.Get(cache)
}); err != nil {
responseJson(w, r, 500, err)
return
} else if !ok {
responseJson(w, r, 400, fmt.Errorf("cache %d: not reserved", id))
return
}
if cache.Complete {
responseJson(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
return
}
start, _, err := parseContentRange(r.Header.Get("Content-Range"))
if err != nil {
responseJson(w, r, 400, err)
return
}
if err := h.storage.Write(cache.ID, start, r.Body); err != nil {
responseJson(w, r, 500, err)
}
h.useCache(r.Context(), id)
responseJson(w, r, 200)
}
// POST /_apis/artifactcache/caches/:id
func (h *Handler) commit(w http.ResponseWriter, r *http.Request) {
id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64)
if err != nil {
responseJson(w, r, 400, err)
return
}
cache := &Cache{
ID: id,
}
if ok, err := h.engine.ExecBool(func(sess *xorm.Session) (bool, error) {
return sess.Get(cache)
}); err != nil {
responseJson(w, r, 500, err)
return
} else if !ok {
responseJson(w, r, 400, fmt.Errorf("cache %d: not reserved", id))
return
}
if cache.Complete {
responseJson(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
return
}
if err := h.storage.Commit(cache.ID, cache.Size); err != nil {
responseJson(w, r, 500, err)
return
}
cache.Complete = true
if err := h.engine.Exec(func(sess *xorm.Session) error {
_, err := sess.ID(cache.ID).Cols("complete").Update(cache)
return err
}); err != nil {
responseJson(w, r, 500, err)
return
}
responseJson(w, r, 200)
}
// GET /_apis/artifactcache/artifacts/:id
func (h *Handler) get(w http.ResponseWriter, r *http.Request) {
id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64)
if err != nil {
responseJson(w, r, 400, err)
return
}
h.useCache(r.Context(), id)
h.storage.Serve(w, r, id)
}
// POST /_apis/artifactcache/clean
func (h *Handler) clean(w http.ResponseWriter, r *http.Request) {
// TODO: don't support force deleting cache entries
// see: https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#force-deleting-cache-entries
responseJson(w, r, 200)
}
// if not found, return (nil, nil) instead of an error.
func (h *Handler) findCache(ctx context.Context, keys []string, version string) (*Cache, error) {
if len(keys) == 0 {
return nil, nil
}
key := keys[0] // the first key is for exact match.
cache := &Cache{}
if ok, err := h.engine.ExecBool(func(sess *xorm.Session) (bool, error) {
return sess.Where(builder.Eq{"key": key, "version": version, "complete": true}).Get(cache)
}); err != nil {
return nil, err
} else if ok {
return cache, nil
}
for _, prefix := range keys[1:] {
if ok, err := h.engine.ExecBool(func(sess *xorm.Session) (bool, error) {
return sess.Where(builder.And(
builder.Like{"key", prefix + "%"},
builder.Eq{"version": version, "complete": true},
)).OrderBy("id DESC").Get(cache)
}); err != nil {
return nil, err
} else if ok {
return cache, nil
}
}
return nil, nil
}
func (h *Handler) useCache(ctx context.Context, id int64) {
// keep quiet
_ = h.engine.Exec(func(sess *xorm.Session) error {
_, err := sess.Context(ctx).Cols("used_at").Update(&Cache{
ID: id,
UsedAt: time.Now().Unix(),
})
return err
})
}
func (h *Handler) gcCache() {
if h.gc.Load() {
return
}
if !h.gc.CompareAndSwap(false, true) {
return
}
defer h.gc.Store(false)
if time.Since(h.gcAt) < time.Hour {
logger.Infof("skip gc: %v", h.gcAt.String())
return
}
h.gcAt = time.Now()
logger.Infof("gc: %v", h.gcAt.String())
const (
keepUsed = 30 * 24 * time.Hour
keepUnused = 7 * 24 * time.Hour
keepTemp = 5 * time.Minute
)
var caches []*Cache
if err := h.engine.Exec(func(sess *xorm.Session) error {
return sess.Where(builder.And(builder.Lt{"used_at": time.Now().Add(-keepTemp).Unix()}, builder.Eq{"complete": false})).
Find(&caches)
}); err != nil {
logger.Warnf("find caches: %v", err)
} else {
for _, cache := range caches {
h.storage.Remove(cache.ID)
if err := h.engine.Exec(func(sess *xorm.Session) error {
_, err := sess.Delete(cache)
return err
}); err != nil {
logger.Warnf("delete cache: %v", err)
continue
}
logger.Infof("deleted cache: %+v", cache)
}
}
caches = caches[:0]
if err := h.engine.Exec(func(sess *xorm.Session) error {
return sess.Where(builder.Lt{"used_at": time.Now().Add(-keepUnused).Unix()}).
Find(&caches)
}); err != nil {
logger.Warnf("find caches: %v", err)
} else {
for _, cache := range caches {
h.storage.Remove(cache.ID)
if err := h.engine.Exec(func(sess *xorm.Session) error {
_, err := sess.Delete(cache)
return err
}); err != nil {
logger.Warnf("delete cache: %v", err)
continue
}
logger.Infof("deleted cache: %+v", cache)
}
}
caches = caches[:0]
if err := h.engine.Exec(func(sess *xorm.Session) error {
return sess.Where(builder.Lt{"created_at": time.Now().Add(-keepUsed).Unix()}).
Find(&caches)
}); err != nil {
logger.Warnf("find caches: %v", err)
} else {
for _, cache := range caches {
h.storage.Remove(cache.ID)
if err := h.engine.Exec(func(sess *xorm.Session) error {
_, err := sess.Delete(cache)
return err
}); err != nil {
logger.Warnf("delete cache: %v", err)
continue
}
logger.Infof("deleted cache: %+v", cache)
}
}
}

View file

@ -0,0 +1,30 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package artifactcache
import (
"fmt"
"net/http"
)
type Cache struct {
ID int64 `xorm:"id pk autoincr" json:"-"`
Key string `xorm:"TEXT index unique(key_version)" json:"key"`
Version string `xorm:"TEXT unique(key_version)" json:"version"`
Size int64 `json:"cacheSize"`
Complete bool `xorm:"index(complete_used_at)" json:"-"`
UsedAt int64 `xorm:"index(complete_used_at) updated" json:"-"`
CreatedAt int64 `xorm:"index created" json:"-"`
}
// Bind implements render.Binder
func (c *Cache) Bind(_ *http.Request) error {
if c.Key == "" {
return fmt.Errorf("missing key")
}
if c.Version == "" {
return fmt.Errorf("missing version")
}
return nil
}

View file

@ -0,0 +1,129 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package artifactcache
import (
"fmt"
"io"
"net/http"
"os"
"path/filepath"
)
type Storage struct {
rootDir string
}
func NewStorage(rootDir string) (*Storage, error) {
if err := os.MkdirAll(rootDir, 0o755); err != nil {
return nil, err
}
return &Storage{
rootDir: rootDir,
}, nil
}
func (s *Storage) Exist(id int64) (bool, error) {
name := s.filename(id)
if _, err := os.Stat(name); os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, err
}
return true, nil
}
func (s *Storage) Write(id int64, offset int64, reader io.Reader) error {
name := s.tempName(id, offset)
if err := os.MkdirAll(filepath.Dir(name), 0o755); err != nil {
return err
}
file, err := os.Create(name)
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(file, reader)
return err
}
func (s *Storage) Commit(id int64, size int64) error {
defer func() {
_ = os.RemoveAll(s.tempDir(id))
}()
name := s.filename(id)
tempNames, err := s.tempNames(id)
if err != nil {
return err
}
if err := os.MkdirAll(filepath.Dir(name), 0o755); err != nil {
return err
}
file, err := os.Create(name)
if err != nil {
return err
}
defer file.Close()
var written int64
for _, v := range tempNames {
f, err := os.Open(v)
if err != nil {
return err
}
n, err := io.Copy(file, f)
_ = f.Close()
if err != nil {
return err
}
written += n
}
if written != size {
_ = file.Close()
_ = os.Remove(name)
return fmt.Errorf("broken file: %v != %v", written, size)
}
return nil
}
func (s *Storage) Serve(w http.ResponseWriter, r *http.Request, id int64) {
name := s.filename(id)
http.ServeFile(w, r, name)
}
func (s *Storage) Remove(id int64) {
_ = os.Remove(s.filename(id))
_ = os.RemoveAll(s.tempDir(id))
}
func (s *Storage) filename(id int64) string {
return filepath.Join(s.rootDir, fmt.Sprintf("%02x", id%0xff), fmt.Sprint(id))
}
func (s *Storage) tempDir(id int64) string {
return filepath.Join(s.rootDir, "tmp", fmt.Sprint(id))
}
func (s *Storage) tempName(id, offset int64) string {
return filepath.Join(s.tempDir(id), fmt.Sprintf("%016x", offset))
}
func (s *Storage) tempNames(id int64) ([]string, error) {
dir := s.tempDir(id)
files, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
var names []string
for _, v := range files {
if !v.IsDir() {
names = append(names, filepath.Join(dir, v.Name()))
}
}
return names, nil
}

View file

@ -0,0 +1,100 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package artifactcache
import (
"fmt"
"net"
"net/http"
"strconv"
"strings"
"sync"
"github.com/go-chi/render"
"xorm.io/xorm"
)
func responseJson(w http.ResponseWriter, r *http.Request, code int, v ...any) {
render.Status(r, code)
if len(v) == 0 || v[0] == nil {
render.JSON(w, r, struct{}{})
} else if err, ok := v[0].(error); ok {
logger.Errorf("%v %v: %v", r.Method, r.RequestURI, err)
render.JSON(w, r, map[string]any{
"error": err.Error(),
})
} else {
render.JSON(w, r, v[0])
}
}
func parseContentRange(s string) (int64, int64, error) {
// support the format like "bytes 11-22/*" only
s, _, _ = strings.Cut(strings.TrimPrefix(s, "bytes "), "/")
s1, s2, _ := strings.Cut(s, "-")
start, err := strconv.ParseInt(s1, 10, 64)
if err != nil {
return 0, 0, fmt.Errorf("parse %q: %w", s, err)
}
stop, err := strconv.ParseInt(s2, 10, 64)
if err != nil {
return 0, 0, fmt.Errorf("parse %q: %w", s, err)
}
return start, stop, nil
}
func getOutboundIP() (net.IP, error) {
// FIXME: It makes more sense to use the gateway IP address of container network
if conn, err := net.Dial("udp", "8.8.8.8:80"); err == nil {
defer conn.Close()
return conn.LocalAddr().(*net.UDPAddr).IP, nil
}
if ifaces, err := net.Interfaces(); err == nil {
for _, i := range ifaces {
if addrs, err := i.Addrs(); err == nil {
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip.IsGlobalUnicast() {
return ip, nil
}
}
}
}
}
return nil, fmt.Errorf("no outbound IP address found")
}
// engine is a wrapper of *xorm.Engine, with a lock.
// To avoid racing of sqlite, we don't care performance here.
type engine struct {
e *xorm.Engine
m sync.Mutex
}
func (e *engine) Exec(f func(*xorm.Session) error) error {
e.m.Lock()
defer e.m.Unlock()
sess := e.e.NewSession()
defer sess.Close()
return f(sess)
}
func (e *engine) ExecBool(f func(*xorm.Session) (bool, error)) (bool, error) {
e.m.Lock()
defer e.m.Unlock()
sess := e.e.NewSession()
defer sess.Close()
return f(sess)
}

View file

@ -1,69 +0,0 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package cmd
import (
"context"
"fmt"
"os"
"os/signal"
"gitea.com/gitea/act_runner/internal/pkg/config"
"github.com/nektos/act/pkg/artifactcache"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
type cacheServerArgs struct {
Dir string
Host string
Port uint16
}
func runCacheServer(ctx context.Context, configFile *string, cacheArgs *cacheServerArgs) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
cfg, err := config.LoadDefault(*configFile)
if err != nil {
return fmt.Errorf("invalid configuration: %w", err)
}
initLogging(cfg)
var (
dir = cfg.Cache.Dir
host = cfg.Cache.Host
port = cfg.Cache.Port
)
// cacheArgs has higher priority
if cacheArgs.Dir != "" {
dir = cacheArgs.Dir
}
if cacheArgs.Host != "" {
host = cacheArgs.Host
}
if cacheArgs.Port != 0 {
port = cacheArgs.Port
}
cacheHandler, err := artifactcache.StartHandler(
dir,
host,
port,
log.StandardLogger().WithField("module", "cache_request"),
)
if err != nil {
return err
}
log.Infof("cache server is listening on %v", cacheHandler.ExternalURL())
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
<-c
return nil
}
}

View file

@ -17,8 +17,8 @@ import (
func Execute(ctx context.Context) {
// ./act_runner
rootCmd := &cobra.Command{
Use: "forgejo-runner [event name to run]\nIf no event name passed, will default to \"on: push\"",
Short: "Run Forgejo Actions locally by specifying the event name (e.g. `push`) or an action name directly.",
Use: "act_runner [event name to run]\nIf no event name passed, will default to \"on: push\"",
Short: "Run GitHub actions locally by specifying the event name (e.g. `push`) or an action name directly.",
Args: cobra.MaximumNArgs(1),
Version: ver.Version(),
SilenceUsage: true,
@ -35,14 +35,12 @@ func Execute(ctx context.Context) {
RunE: runRegister(ctx, &regArgs, &configFile), // must use a pointer to regArgs
}
registerCmd.Flags().BoolVar(&regArgs.NoInteractive, "no-interactive", false, "Disable interactive mode")
registerCmd.Flags().StringVar(&regArgs.InstanceAddr, "instance", "", "Forgejo instance address")
registerCmd.Flags().StringVar(&regArgs.InstanceAddr, "instance", "", "Gitea instance address")
registerCmd.Flags().StringVar(&regArgs.Token, "token", "", "Runner token")
registerCmd.Flags().StringVar(&regArgs.RunnerName, "name", "", "Runner name")
registerCmd.Flags().StringVar(&regArgs.Labels, "labels", "", "Runner tags, comma separated")
rootCmd.AddCommand(registerCmd)
rootCmd.AddCommand(createRunnerFileCmd(ctx, &configFile))
// ./act_runner daemon
daemonCmd := &cobra.Command{
Use: "daemon",
@ -65,19 +63,6 @@ func Execute(ctx context.Context) {
},
})
// ./act_runner cache-server
var cacheArgs cacheServerArgs
cacheCmd := &cobra.Command{
Use: "cache-server",
Short: "Start a cache server for the cache action",
Args: cobra.MaximumNArgs(0),
RunE: runCacheServer(ctx, &configFile, &cacheArgs),
}
cacheCmd.Flags().StringVarP(&cacheArgs.Dir, "dir", "d", "", "Cache directory")
cacheCmd.Flags().StringVarP(&cacheArgs.Host, "host", "s", "", "Host of the cache server")
cacheCmd.Flags().Uint16VarP(&cacheArgs.Port, "port", "p", 0, "Port of the cache server")
rootCmd.AddCommand(cacheCmd)
// hide completion command
rootCmd.CompletionOptions.HiddenDefaultCmd = true

View file

@ -1,164 +0,0 @@
// SPDX-License-Identifier: MIT
package cmd
import (
"context"
"encoding/hex"
"fmt"
"os"
pingv1 "code.gitea.io/actions-proto-go/ping/v1"
"connectrpc.com/connect"
gouuid "github.com/google/uuid"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"gitea.com/gitea/act_runner/internal/app/run"
"gitea.com/gitea/act_runner/internal/pkg/client"
"gitea.com/gitea/act_runner/internal/pkg/config"
"gitea.com/gitea/act_runner/internal/pkg/ver"
)
type createRunnerFileArgs struct {
Connect bool
InstanceAddr string
Secret string
Name string
}
func createRunnerFileCmd(ctx context.Context, configFile *string) *cobra.Command {
var argsVar createRunnerFileArgs
cmd := &cobra.Command{
Use: "create-runner-file",
Short: "Create a runner file using a shared secret used to pre-register the runner on the Forgejo instance",
Args: cobra.MaximumNArgs(0),
RunE: runCreateRunnerFile(ctx, &argsVar, configFile),
}
cmd.Flags().BoolVar(&argsVar.Connect, "connect", false, "tries to connect to the instance using the secret (Forgejo v1.21 instance or greater)")
cmd.Flags().StringVar(&argsVar.InstanceAddr, "instance", "", "Forgejo instance address")
cmd.MarkFlagRequired("instance")
cmd.Flags().StringVar(&argsVar.Secret, "secret", "", "secret shared with the Forgejo instance via forgejo-cli actions register")
cmd.MarkFlagRequired("secret")
cmd.Flags().StringVar(&argsVar.Name, "name", "", "Runner name")
return cmd
}
// must be exactly the same as fogejo/models/actions/forgejo.go
func uuidFromSecret(secret string) (string, error) {
uuid, err := gouuid.FromBytes([]byte(secret[:16]))
if err != nil {
return "", fmt.Errorf("gouuid.FromBytes %v", err)
}
return uuid.String(), nil
}
// should be exactly the same as forgejo/cmd/forgejo/actions.go
func validateSecret(secret string) error {
secretLen := len(secret)
if secretLen != 40 {
return fmt.Errorf("the secret must be exactly 40 characters long, not %d", secretLen)
}
if _, err := hex.DecodeString(secret); err != nil {
return fmt.Errorf("the secret must be an hexadecimal string: %w", err)
}
return nil
}
func ping(cfg *config.Config, reg *config.Registration) error {
// initial http client
cli := client.New(
reg.Address,
cfg.Runner.Insecure,
"",
"",
ver.Version(),
)
_, err := cli.Ping(context.Background(), connect.NewRequest(&pingv1.PingRequest{
Data: reg.UUID,
}))
if err != nil {
return fmt.Errorf("ping %s failed %w", reg.Address, err)
}
return nil
}
func runCreateRunnerFile(ctx context.Context, args *createRunnerFileArgs, configFile *string) func(cmd *cobra.Command, args []string) error {
return func(*cobra.Command, []string) error {
log.SetLevel(log.DebugLevel)
log.Info("Creating runner file")
//
// Prepare the registration data
//
cfg, err := config.LoadDefault(*configFile)
if err != nil {
return fmt.Errorf("invalid configuration: %w", err)
}
if err := validateSecret(args.Secret); err != nil {
return err
}
uuid, err := uuidFromSecret(args.Secret)
if err != nil {
return err
}
name := args.Name
if name == "" {
name, _ = os.Hostname()
log.Infof("Runner name is empty, use hostname '%s'.", name)
}
reg := &config.Registration{
Name: name,
UUID: uuid,
Token: args.Secret,
Address: args.InstanceAddr,
}
//
// Verify the Forgejo instance is reachable
//
if err := ping(cfg, reg); err != nil {
return err
}
//
// Save the registration file
//
if err := config.SaveRegistration(cfg.Runner.File, reg); err != nil {
return fmt.Errorf("failed to save runner config to %s: %w", cfg.Runner.File, err)
}
//
// Verify the secret works
//
if args.Connect {
cli := client.New(
reg.Address,
cfg.Runner.Insecure,
reg.UUID,
reg.Token,
ver.Version(),
)
runner := run.NewRunner(cfg, reg, cli)
resp, err := runner.Declare(ctx, cfg.Runner.Labels)
if err != nil && connect.CodeOf(err) == connect.CodeUnimplemented {
log.Warn("Cannot verify the connection because the Forgejo instance is lower than v1.21")
} else if err != nil {
log.WithError(err).Error("fail to invoke Declare")
return err
} else {
log.Infof("connection successful: %s, with version: %s, with labels: %v",
resp.Msg.Runner.Name, resp.Msg.Runner.Version, resp.Msg.Runner.Labels)
}
}
return nil
}
}

View file

@ -1,118 +0,0 @@
// SPDX-License-Identifier: MIT
package cmd
import (
"bytes"
"context"
"os"
"testing"
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
"connectrpc.com/connect"
"gitea.com/gitea/act_runner/internal/pkg/client"
"gitea.com/gitea/act_runner/internal/pkg/config"
"gitea.com/gitea/act_runner/internal/pkg/ver"
"github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v3"
)
func executeCommand(ctx context.Context, cmd *cobra.Command, args ...string) (string, error) {
buf := new(bytes.Buffer)
cmd.SetOut(buf)
cmd.SetErr(buf)
cmd.SetArgs(args)
err := cmd.ExecuteContext(ctx)
return buf.String(), err
}
func Test_createRunnerFileCmd(t *testing.T) {
configFile := "config.yml"
ctx := context.Background()
cmd := createRunnerFileCmd(ctx, &configFile)
output, err := executeCommand(ctx, cmd)
assert.ErrorContains(t, err, `required flag(s) "instance", "secret" not set`)
assert.Contains(t, output, "Usage:")
}
func Test_validateSecret(t *testing.T) {
assert.ErrorContains(t, validateSecret("abc"), "exactly 40 characters")
assert.ErrorContains(t, validateSecret("ZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), "must be an hexadecimal")
}
func Test_uuidFromSecret(t *testing.T) {
uuid, err := uuidFromSecret("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
assert.NoError(t, err)
assert.EqualValues(t, uuid, "41414141-4141-4141-4141-414141414141")
}
func Test_ping(t *testing.T) {
cfg := &config.Config{}
address := os.Getenv("FORGEJO_URL")
if address == "" {
address = "https://code.forgejo.org"
}
reg := &config.Registration{
Address: address,
UUID: "create-runner-file_test.go",
}
assert.NoError(t, ping(cfg, reg))
}
func Test_runCreateRunnerFile(t *testing.T) {
//
// Set the .runner file to be in a temporary directory
//
dir := t.TempDir()
configFile := dir + "/config.yml"
runnerFile := dir + "/.runner"
cfg, err := config.LoadDefault("")
cfg.Runner.File = runnerFile
yamlData, err := yaml.Marshal(cfg)
assert.NoError(t, err)
assert.NoError(t, os.WriteFile(configFile, yamlData, 0o666))
instance, has := os.LookupEnv("FORGEJO_URL")
if !has {
instance = "https://code.forgejo.org"
}
secret, has := os.LookupEnv("FORGEJO_RUNNER_SECRET")
assert.True(t, has)
name := "testrunner"
//
// Run create-runner-file
//
ctx := context.Background()
cmd := createRunnerFileCmd(ctx, &configFile)
output, err := executeCommand(ctx, cmd, "--connect", "--secret", secret, "--instance", instance, "--name", name)
assert.NoError(t, err)
assert.EqualValues(t, "", output)
//
// Read back the runner file and verify its content
//
reg, err := config.LoadRegistration(runnerFile)
assert.NoError(t, err)
assert.EqualValues(t, secret, reg.Token)
assert.EqualValues(t, instance, reg.Address)
//
// Verify that fetching a task successfully returns there is
// no task for this runner
//
cli := client.New(
reg.Address,
cfg.Runner.Insecure,
reg.UUID,
reg.Token,
ver.Version(),
)
resp, err := cli.FetchTask(ctx, connect.NewRequest(&runnerv1.FetchTaskRequest{}))
assert.NoError(t, err)
assert.Nil(t, resp.Msg.Task)
}

View file

@ -7,13 +7,7 @@ import (
"context"
"fmt"
"os"
"path"
"path/filepath"
"runtime"
"strconv"
"strings"
"connectrpc.com/connect"
"github.com/mattn/go-isatty"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@ -29,13 +23,14 @@ import (
func runDaemon(ctx context.Context, configFile *string) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
log.Infoln("Starting runner daemon")
cfg, err := config.LoadDefault(*configFile)
if err != nil {
return fmt.Errorf("invalid configuration: %w", err)
}
initLogging(cfg)
log.Infoln("Starting runner daemon")
reg, err := config.LoadRegistration(cfg.Runner.File)
if os.IsNotExist(err) {
@ -45,15 +40,8 @@ func runDaemon(ctx context.Context, configFile *string) func(cmd *cobra.Command,
return fmt.Errorf("failed to load registration file: %w", err)
}
cfg.Tune(reg.Address)
lbls := reg.Labels
if len(cfg.Runner.Labels) > 0 {
lbls = cfg.Runner.Labels
}
ls := labels.Labels{}
for _, l := range lbls {
for _, l := range reg.Labels {
label, err := labels.Parse(l)
if err != nil {
log.WithError(err).Warnf("ignored invalid label %q", l)
@ -66,28 +54,9 @@ func runDaemon(ctx context.Context, configFile *string) func(cmd *cobra.Command,
}
if ls.RequireDocker() {
dockerSocketPath, err := getDockerSocketPath(cfg.Container.DockerHost)
if err != nil {
if err := envcheck.CheckIfDockerRunning(ctx); err != nil {
return err
}
if err := envcheck.CheckIfDockerRunning(ctx, dockerSocketPath); err != nil {
return err
}
// if dockerSocketPath passes the check, override DOCKER_HOST with dockerSocketPath
os.Setenv("DOCKER_HOST", dockerSocketPath)
// empty cfg.Container.DockerHost means act_runner need to find an available docker host automatically
// and assign the path to cfg.Container.DockerHost
if cfg.Container.DockerHost == "" {
cfg.Container.DockerHost = dockerSocketPath
}
// check the scheme, if the scheme is not npipe or unix
// set cfg.Container.DockerHost to "-" because it can't be mounted to the job container
if protoIndex := strings.Index(cfg.Container.DockerHost, "://"); protoIndex != -1 {
scheme := cfg.Container.DockerHost[:protoIndex]
if !strings.EqualFold(scheme, "npipe") && !strings.EqualFold(scheme, "unix") {
cfg.Container.DockerHost = "-"
}
}
}
cli := client.New(
@ -99,39 +68,10 @@ func runDaemon(ctx context.Context, configFile *string) func(cmd *cobra.Command,
)
runner := run.NewRunner(cfg, reg, cli)
// declare the labels of the runner before fetching tasks
resp, err := runner.Declare(ctx, ls.Names())
if err != nil && connect.CodeOf(err) == connect.CodeUnimplemented {
// Gitea instance is older version. skip declare step.
log.Warn("Because the Forgejo instance is an old version, skipping declaring the labels and version.")
} else if err != nil {
log.WithError(err).Error("fail to invoke Declare")
return err
} else {
log.Infof("runner: %s, with version: %s, with labels: %v, declared successfully",
resp.Msg.Runner.Name, resp.Msg.Runner.Version, resp.Msg.Runner.Labels)
// if declared successfully, override the labels in the.runner file with valid labels in the config file (if specified)
runner.Update(ctx, ls)
reg.Labels = ls.ToStrings()
if err := config.SaveRegistration(cfg.Runner.File, reg); err != nil {
return fmt.Errorf("failed to save runner config: %w", err)
}
}
poller := poll.New(cfg, cli, runner)
go poller.Poll()
poller.Poll(ctx)
<-ctx.Done()
log.Infof("runner: %s shutdown initiated, waiting [runner].shutdown_timeout=%s for running jobs to complete before shutting down", resp.Msg.Runner.Name, cfg.Runner.ShutdownTimeout)
ctx, cancel := context.WithTimeout(context.Background(), cfg.Runner.ShutdownTimeout)
defer cancel()
err = poller.Shutdown(ctx)
if err != nil {
log.Warnf("runner: %s cancelled in progress jobs during shutdown", resp.Msg.Runner.Name)
}
return nil
}
}
@ -139,11 +79,10 @@ func runDaemon(ctx context.Context, configFile *string) func(cmd *cobra.Command,
// initLogging setup the global logrus logger.
func initLogging(cfg *config.Config) {
isTerm := isatty.IsTerminal(os.Stdout.Fd())
format := &log.TextFormatter{
log.SetFormatter(&log.TextFormatter{
DisableColors: !isTerm,
FullTimestamp: true,
}
log.SetFormatter(format)
})
if l := cfg.Log.Level; l != "" {
level, err := log.ParseLevel(l)
@ -151,58 +90,9 @@ func initLogging(cfg *config.Config) {
log.WithError(err).
Errorf("invalid log level: %q", l)
}
// debug level
if level == log.DebugLevel {
log.SetReportCaller(true)
format.CallerPrettyfier = func(f *runtime.Frame) (string, string) {
// get function name
s := strings.Split(f.Function, ".")
funcname := "[" + s[len(s)-1] + "]"
// get file name and line number
_, filename := path.Split(f.File)
filename = "[" + filename + ":" + strconv.Itoa(f.Line) + "]"
return funcname, filename
}
log.SetFormatter(format)
}
if log.GetLevel() != level {
log.Infof("log level changed to %v", level)
log.SetLevel(level)
}
}
}
var commonSocketPaths = []string{
"/var/run/docker.sock",
"/run/podman/podman.sock",
"$HOME/.colima/docker.sock",
"$XDG_RUNTIME_DIR/docker.sock",
"$XDG_RUNTIME_DIR/podman/podman.sock",
`\\.\pipe\docker_engine`,
"$HOME/.docker/run/docker.sock",
}
func getDockerSocketPath(configDockerHost string) (string, error) {
// a `-` means don't mount the docker socket to job containers
if configDockerHost != "" && configDockerHost != "-" {
return configDockerHost, nil
}
socket, found := os.LookupEnv("DOCKER_HOST")
if found {
return socket, nil
}
for _, p := range commonSocketPaths {
if _, err := os.Lstat(os.ExpandEnv(p)); err == nil {
if strings.HasPrefix(p, `\\.\`) {
return "npipe://" + filepath.ToSlash(os.ExpandEnv(p)), nil
}
return "unix://" + filepath.ToSlash(os.ExpandEnv(p)), nil
}
}
return "", fmt.Errorf("daemon Docker Engine socket not found and docker_host config was invalid")
}

View file

@ -13,9 +13,7 @@ import (
"strings"
"time"
"github.com/docker/docker/api/types/container"
"github.com/joho/godotenv"
"github.com/nektos/act/pkg/artifactcache"
"github.com/nektos/act/pkg/artifacts"
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/model"
@ -23,6 +21,8 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"golang.org/x/term"
"gitea.com/gitea/act_runner/internal/app/artifactcache"
)
type executeArgs struct {
@ -39,7 +39,7 @@ type executeArgs struct {
envs []string
envfile string
secrets []string
defaultActionsURL string
defaultActionsUrl string
insecureSecrets bool
privileged bool
usernsMode string
@ -57,9 +57,6 @@ type executeArgs struct {
dryrun bool
image string
cacheHandler *artifactcache.Handler
network string
enableIPv6 bool
githubInstance string
}
// WorkflowsPath returns path to workflow file(s)
@ -253,7 +250,7 @@ func runExecList(ctx context.Context, planner model.WorkflowPlanner, execArgs *e
var filterPlan *model.Plan
// Determine the event name to be filtered
var filterEventName string
var filterEventName string = ""
if len(execArgs.event) > 0 {
log.Infof("Using chosed event for filtering: %s", execArgs.event)
@ -290,7 +287,7 @@ func runExecList(ctx context.Context, planner model.WorkflowPlanner, execArgs *e
}
}
_ = printList(filterPlan)
printList(filterPlan)
return nil
}
@ -317,7 +314,7 @@ func runExec(ctx context.Context, execArgs *executeArgs) func(cmd *cobra.Command
if len(execArgs.event) > 0 {
log.Infof("Using chosed event for filtering: %s", execArgs.event)
eventName = execArgs.event
eventName = args[0]
} else if len(events) == 1 && len(events[0]) > 0 {
log.Infof("Using the only detected workflow event: %s", events[0])
eventName = events[0]
@ -352,31 +349,13 @@ func runExec(ctx context.Context, execArgs *executeArgs) func(cmd *cobra.Command
}
// init a cache server
handler, err := artifactcache.StartHandler("", "", 0, log.StandardLogger().WithField("module", "cache_request"))
handler, err := artifactcache.StartHandler("", "", 0)
if err != nil {
return err
}
log.Infof("cache handler listens on: %v", handler.ExternalURL())
execArgs.cacheHandler = handler
if len(execArgs.artifactServerAddr) == 0 {
ip := common.GetOutboundIP()
if ip == nil {
return fmt.Errorf("unable to determine outbound IP address")
}
execArgs.artifactServerAddr = ip.String()
}
if len(execArgs.artifactServerPath) == 0 {
tempDir, err := os.MkdirTemp("", "gitea-act-")
if err != nil {
fmt.Println(err)
}
defer os.RemoveAll(tempDir)
execArgs.artifactServerPath = tempDir
}
// run the plan
config := &runner.Config{
Workdir: execArgs.Workdir(),
@ -394,46 +373,47 @@ func runExec(ctx context.Context, execArgs *executeArgs) func(cmd *cobra.Command
ContainerArchitecture: execArgs.containerArchitecture,
ContainerDaemonSocket: execArgs.containerDaemonSocket,
UseGitIgnore: execArgs.useGitIgnore,
GitHubInstance: execArgs.githubInstance,
ContainerCapAdd: execArgs.containerCapAdd,
ContainerCapDrop: execArgs.containerCapDrop,
ContainerOptions: execArgs.containerOptions,
AutoRemove: true,
ArtifactServerPath: execArgs.artifactServerPath,
ArtifactServerPort: execArgs.artifactServerPort,
ArtifactServerAddr: execArgs.artifactServerAddr,
NoSkipCheckout: execArgs.noSkipCheckout,
// PresetGitHubContext: preset,
// EventJSON: string(eventJSON),
ContainerNamePrefix: fmt.Sprintf("FORGEJO-ACTIONS-TASK-%s", eventName),
ContainerMaxLifetime: maxLifetime,
ContainerNetworkMode: container.NetworkMode(execArgs.network),
ContainerNetworkEnableIPv6: execArgs.enableIPv6,
DefaultActionInstance: execArgs.defaultActionsURL,
// GitHubInstance: t.client.Address(),
ContainerCapAdd: execArgs.containerCapAdd,
ContainerCapDrop: execArgs.containerCapDrop,
ContainerOptions: execArgs.containerOptions,
AutoRemove: true,
ArtifactServerPath: execArgs.artifactServerPath,
ArtifactServerPort: execArgs.artifactServerPort,
NoSkipCheckout: execArgs.noSkipCheckout,
// PresetGitHubContext: preset,
// EventJSON: string(eventJSON),
ContainerNamePrefix: fmt.Sprintf("GITEA-ACTIONS-TASK-%s", eventName),
ContainerMaxLifetime: maxLifetime,
ContainerNetworkMode: "bridge",
DefaultActionInstance: execArgs.defaultActionsUrl,
PlatformPicker: func(_ []string) string {
return execArgs.image
},
ValidVolumes: []string{"**"}, // All volumes are allowed for `exec` command
}
config.Env["ACT_EXEC"] = "true"
if t := config.Secrets["GITEA_TOKEN"]; t != "" {
config.Token = t
} else if t := config.Secrets["GITHUB_TOKEN"]; t != "" {
config.Token = t
}
if !execArgs.debug {
logLevel := log.InfoLevel
config.JobLoggerLevel = &logLevel
}
// TODO: handle log level config
// waiting https://gitea.com/gitea/act/pulls/19
// if !execArgs.debug {
// logLevel := log.Level(log.InfoLevel)
// config.JobLoggerLevel = &logLevel
// }
r, err := runner.New(config)
if err != nil {
return err
}
if len(execArgs.artifactServerPath) == 0 {
tempDir, err := os.MkdirTemp("", "gitea-act-")
if err != nil {
fmt.Println(err)
}
defer os.RemoveAll(tempDir)
execArgs.artifactServerPath = tempDir
}
artifactCancel := artifacts.Serve(ctx, execArgs.artifactServerPath, execArgs.artifactServerAddr, execArgs.artifactServerPort)
log.Debugf("artifacts server started at %s:%s", execArgs.artifactServerPath, execArgs.artifactServerPort)
@ -480,16 +460,12 @@ func loadExecCmd(ctx context.Context) *cobra.Command {
execCmd.Flags().StringArrayVarP(&execArg.containerCapDrop, "container-cap-drop", "", []string{}, "kernel capabilities to remove from the workflow containers (e.g. --container-cap-drop SYS_PTRACE)")
execCmd.Flags().StringVarP(&execArg.containerOptions, "container-opts", "", "", "container options")
execCmd.PersistentFlags().StringVarP(&execArg.artifactServerPath, "artifact-server-path", "", ".", "Defines the path where the artifact server stores uploads and retrieves downloads from. If not specified the artifact server will not start.")
execCmd.PersistentFlags().StringVarP(&execArg.artifactServerAddr, "artifact-server-addr", "", "", "Defines the address where the artifact server listens")
execCmd.PersistentFlags().StringVarP(&execArg.artifactServerPort, "artifact-server-port", "", "34567", "Defines the port where the artifact server listens (will only bind to localhost).")
execCmd.PersistentFlags().StringVarP(&execArg.defaultActionsURL, "default-actions-url", "", "https://code.forgejo.org", "Defines the default base url of the action.")
execCmd.PersistentFlags().StringVarP(&execArg.defaultActionsUrl, "default-actions-url", "", "https://code.forgejo.org", "Defines the default url of action instance.")
execCmd.PersistentFlags().BoolVarP(&execArg.noSkipCheckout, "no-skip-checkout", "", false, "Do not skip actions/checkout")
execCmd.PersistentFlags().BoolVarP(&execArg.debug, "debug", "d", false, "enable debug log")
execCmd.PersistentFlags().BoolVarP(&execArg.dryrun, "dryrun", "n", false, "dryrun mode")
execCmd.PersistentFlags().StringVarP(&execArg.image, "image", "i", "node:20-bullseye", "Docker image to use. Use \"-self-hosted\" to run directly on the host.")
execCmd.PersistentFlags().StringVarP(&execArg.network, "network", "", "", "Specify the network to which the container will connect")
execCmd.PersistentFlags().BoolVarP(&execArg.enableIPv6, "enable-ipv6", "6", false, "Create network with IPv6 enabled.")
execCmd.PersistentFlags().StringVarP(&execArg.githubInstance, "gitea-instance", "", "", "Gitea instance to use.")
execCmd.PersistentFlags().StringVarP(&execArg.image, "image", "i", "node:16-bullseye", "docker image to use")
return execCmd
}

View file

@ -15,7 +15,7 @@ import (
pingv1 "code.gitea.io/actions-proto-go/ping/v1"
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
"connectrpc.com/connect"
"github.com/bufbuild/connect-go"
"github.com/mattn/go-isatty"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@ -47,12 +47,12 @@ func runRegister(ctx context.Context, regArgs *registerArgs, configFile *string)
}
if regArgs.NoInteractive {
if err := registerNoInteractive(ctx, *configFile, regArgs); err != nil {
if err := registerNoInteractive(*configFile, regArgs); err != nil {
return err
}
} else {
go func() {
if err := registerInteractive(ctx, *configFile); err != nil {
if err := registerInteractive(*configFile); err != nil {
log.Fatal(err)
return
}
@ -85,20 +85,23 @@ const (
StageInputInstance
StageInputToken
StageInputRunnerName
StageInputLabels
StageInputCustomLabels
StageWaitingForRegistration
StageExit
)
var defaultLabels = []string{
"docker:docker://node:20-bullseye",
"ubuntu-latest:docker://node:16-bullseye",
"ubuntu-22.04:docker://node:16-bullseye", // There's no node:16-bookworm yet
"ubuntu-20.04:docker://node:16-bullseye",
"ubuntu-18.04:docker://node:16-buster",
}
type registerInputs struct {
InstanceAddr string
Token string
RunnerName string
Labels []string
CustomLabels []string
}
func (r *registerInputs) validate() error {
@ -108,8 +111,8 @@ func (r *registerInputs) validate() error {
if r.Token == "" {
return fmt.Errorf("token is empty")
}
if len(r.Labels) > 0 {
return validateLabels(r.Labels)
if len(r.CustomLabels) > 0 {
return validateLabels(r.CustomLabels)
}
return nil
}
@ -123,7 +126,7 @@ func validateLabels(ls []string) error {
return nil
}
func (r *registerInputs) assignToNext(stage registerStage, value string, cfg *config.Config) registerStage {
func (r *registerInputs) assignToNext(stage registerStage, value string) registerStage {
// must set instance address and token.
// if empty, keep current stage.
if stage == StageInputInstance || stage == StageInputToken {
@ -151,40 +154,23 @@ func (r *registerInputs) assignToNext(stage registerStage, value string, cfg *co
return StageInputRunnerName
case StageInputRunnerName:
r.RunnerName = value
// if there are some labels configured in config file, skip input labels stage
if len(cfg.Runner.Labels) > 0 {
ls := make([]string, 0, len(cfg.Runner.Labels))
for _, l := range cfg.Runner.Labels {
_, err := labels.Parse(l)
if err != nil {
log.WithError(err).Warnf("ignored invalid label %q", l)
continue
}
ls = append(ls, l)
}
if len(ls) == 0 {
log.Warn("no valid labels configured in config file, runner may not be able to pick up jobs")
}
r.Labels = ls
return StageWaitingForRegistration
}
return StageInputLabels
case StageInputLabels:
r.Labels = defaultLabels
return StageInputCustomLabels
case StageInputCustomLabels:
r.CustomLabels = defaultLabels
if value != "" {
r.Labels = strings.Split(value, ",")
r.CustomLabels = strings.Split(value, ",")
}
if validateLabels(r.Labels) != nil {
log.Infoln("Invalid labels, please input again, leave blank to use the default labels (for example, ubuntu-20.04:docker://node:20-bookworm,ubuntu-18.04:docker://node:20-bookworm)")
return StageInputLabels
if validateLabels(r.CustomLabels) != nil {
log.Infoln("Invalid labels, please input again, leave blank to use the default labels (for example, ubuntu-20.04:docker://node:16-bullseye,ubuntu-18.04:docker://node:16-buster,linux_arm:host)")
return StageInputCustomLabels
}
return StageWaitingForRegistration
}
return StageUnknown
}
func registerInteractive(ctx context.Context, configFile string) error {
func registerInteractive(configFile string) error {
var (
reader = bufio.NewReader(os.Stdin)
stage = StageInputInstance
@ -206,14 +192,15 @@ func registerInteractive(ctx context.Context, configFile string) error {
if err != nil {
return err
}
stage = inputs.assignToNext(stage, strings.TrimSpace(cmdString), cfg)
stage = inputs.assignToNext(stage, strings.TrimSpace(cmdString))
if stage == StageWaitingForRegistration {
log.Infof("Registering runner, name=%s, instance=%s, labels=%v.", inputs.RunnerName, inputs.InstanceAddr, inputs.Labels)
if err := doRegister(ctx, cfg, inputs); err != nil {
return fmt.Errorf("Failed to register runner: %w", err)
log.Infof("Registering runner, name=%s, instance=%s, labels=%v.", inputs.RunnerName, inputs.InstanceAddr, inputs.CustomLabels)
if err := doRegister(cfg, inputs); err != nil {
log.Errorf("Failed to register runner: %v", err)
} else {
log.Infof("Runner registered successfully.")
}
log.Infof("Runner registered successfully.")
return nil
}
@ -233,20 +220,20 @@ func printStageHelp(stage registerStage) {
case StageOverwriteLocalConfig:
log.Infoln("Runner is already registered, overwrite local config? [y/N]")
case StageInputInstance:
log.Infoln("Enter the Forgejo instance URL (for example, https://next.forgejo.org/):")
log.Infoln("Enter the Gitea instance URL (for example, https://gitea.com/):")
case StageInputToken:
log.Infoln("Enter the runner token:")
case StageInputRunnerName:
hostname, _ := os.Hostname()
log.Infof("Enter the runner name (if set empty, use hostname: %s):\n", hostname)
case StageInputLabels:
log.Infoln("Enter the runner labels, leave blank to use the default labels (comma-separated, for example, ubuntu-20.04:docker://node:20-bookworm,ubuntu-18.04:docker://node:20-bookworm):")
case StageInputCustomLabels:
log.Infoln("Enter the runner labels, leave blank to use the default labels (comma-separated, for example, ubuntu-20.04:docker://node:16-bullseye,ubuntu-18.04:docker://node:16-buster,linux_arm:host):")
case StageWaitingForRegistration:
log.Infoln("Waiting for registration...")
}
}
func registerNoInteractive(ctx context.Context, configFile string, regArgs *registerArgs) error {
func registerNoInteractive(configFile string, regArgs *registerArgs) error {
cfg, err := config.LoadDefault(configFile)
if err != nil {
return err
@ -255,21 +242,12 @@ func registerNoInteractive(ctx context.Context, configFile string, regArgs *regi
InstanceAddr: regArgs.InstanceAddr,
Token: regArgs.Token,
RunnerName: regArgs.RunnerName,
Labels: defaultLabels,
CustomLabels: defaultLabels,
}
regArgs.Labels = strings.TrimSpace(regArgs.Labels)
// command line flag.
if regArgs.Labels != "" {
inputs.Labels = strings.Split(regArgs.Labels, ",")
inputs.CustomLabels = strings.Split(regArgs.Labels, ",")
}
// specify labels in config file.
if len(cfg.Runner.Labels) > 0 {
if regArgs.Labels != "" {
log.Warn("Labels from command will be ignored, use labels defined in config file.")
}
inputs.Labels = cfg.Runner.Labels
}
if inputs.RunnerName == "" {
inputs.RunnerName, _ = os.Hostname()
log.Infof("Runner name is empty, use hostname '%s'.", inputs.RunnerName)
@ -278,14 +256,17 @@ func registerNoInteractive(ctx context.Context, configFile string, regArgs *regi
log.WithError(err).Errorf("Invalid input, please re-run act command.")
return nil
}
if err := doRegister(ctx, cfg, inputs); err != nil {
return fmt.Errorf("Failed to register runner: %w", err)
if err := doRegister(cfg, inputs); err != nil {
log.Errorf("Failed to register runner: %v", err)
return nil
}
log.Infof("Runner registered successfully.")
return nil
}
func doRegister(ctx context.Context, cfg *config.Config, inputs *registerInputs) error {
func doRegister(cfg *config.Config, inputs *registerInputs) error {
ctx := context.Background()
// initial http client
cli := client.New(
inputs.InstanceAddr,
@ -301,7 +282,7 @@ func doRegister(ctx context.Context, cfg *config.Config, inputs *registerInputs)
}))
select {
case <-ctx.Done():
return ctx.Err()
return nil
default:
}
if ctx.Err() != nil {
@ -309,11 +290,11 @@ func doRegister(ctx context.Context, cfg *config.Config, inputs *registerInputs)
}
if err != nil {
log.WithError(err).
Errorln("Cannot ping the Forgejo instance server")
Errorln("Cannot ping the Gitea instance server")
// TODO: if ping failed, retry or exit
time.Sleep(time.Second)
} else {
log.Debugln("Successfully pinged the Forgejo instance server")
log.Debugln("Successfully pinged the Gitea instance server")
break
}
}
@ -322,7 +303,7 @@ func doRegister(ctx context.Context, cfg *config.Config, inputs *registerInputs)
Name: inputs.RunnerName,
Token: inputs.Token,
Address: inputs.InstanceAddr,
Labels: inputs.Labels,
Labels: inputs.CustomLabels,
}
ls := make([]string, len(reg.Labels))
@ -334,9 +315,7 @@ func doRegister(ctx context.Context, cfg *config.Config, inputs *registerInputs)
resp, err := cli.Register(ctx, connect.NewRequest(&runnerv1.RegisterRequest{
Name: reg.Name,
Token: reg.Token,
Version: ver.Version(),
AgentLabels: ls, // Could be removed after Gitea 1.20
Labels: ls,
AgentLabels: ls,
}))
if err != nil {
log.WithError(err).Error("poller: cannot register new runner")

View file

@ -6,12 +6,10 @@ package poll
import (
"context"
"errors"
"fmt"
"sync"
"sync/atomic"
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
"connectrpc.com/connect"
"github.com/bufbuild/connect-go"
log "github.com/sirupsen/logrus"
"golang.org/x/time/rate"
@ -20,148 +18,64 @@ import (
"gitea.com/gitea/act_runner/internal/pkg/config"
)
const PollerID = "PollerID"
type Poller interface {
Poll()
Shutdown(ctx context.Context) error
type Poller struct {
client client.Client
runner *run.Runner
cfg *config.Config
}
type poller struct {
client client.Client
runner run.RunnerInterface
cfg *config.Config
tasksVersion atomic.Int64 // tasksVersion used to store the version of the last task fetched from the Gitea.
pollingCtx context.Context
shutdownPolling context.CancelFunc
jobsCtx context.Context
shutdownJobs context.CancelFunc
done chan any
func New(cfg *config.Config, client client.Client, runner *run.Runner) *Poller {
return &Poller{
client: client,
runner: runner,
cfg: cfg,
}
}
func New(cfg *config.Config, client client.Client, runner run.RunnerInterface) Poller {
return (&poller{}).init(cfg, client, runner)
}
func (p *poller) init(cfg *config.Config, client client.Client, runner run.RunnerInterface) Poller {
pollingCtx, shutdownPolling := context.WithCancel(context.Background())
jobsCtx, shutdownJobs := context.WithCancel(context.Background())
done := make(chan any)
p.client = client
p.runner = runner
p.cfg = cfg
p.pollingCtx = pollingCtx
p.shutdownPolling = shutdownPolling
p.jobsCtx = jobsCtx
p.shutdownJobs = shutdownJobs
p.done = done
return p
}
func (p *poller) Poll() {
func (p *Poller) Poll(ctx context.Context) {
limiter := rate.NewLimiter(rate.Every(p.cfg.Runner.FetchInterval), 1)
wg := &sync.WaitGroup{}
for i := 0; i < p.cfg.Runner.Capacity; i++ {
wg.Add(1)
go p.poll(i, wg, limiter)
go p.poll(ctx, wg, limiter)
}
wg.Wait()
// signal the poller is finished
close(p.done)
}
func (p *poller) Shutdown(ctx context.Context) error {
p.shutdownPolling()
select {
case <-p.done:
log.Trace("all jobs are complete")
return nil
case <-ctx.Done():
log.Trace("forcing the jobs to shutdown")
p.shutdownJobs()
<-p.done
log.Trace("all jobs have been shutdown")
return ctx.Err()
}
}
func (p *poller) poll(id int, wg *sync.WaitGroup, limiter *rate.Limiter) {
log.Infof("[poller %d] launched", id)
func (p *Poller) poll(ctx context.Context, wg *sync.WaitGroup, limiter *rate.Limiter) {
defer wg.Done()
for {
if err := limiter.Wait(p.pollingCtx); err != nil {
log.Infof("[poller %d] shutdown", id)
if err := limiter.Wait(ctx); err != nil {
if ctx.Err() != nil {
log.WithError(err).Debug("limiter wait failed")
}
return
}
task, ok := p.fetchTask(p.pollingCtx)
task, ok := p.fetchTask(ctx)
if !ok {
continue
}
p.runTaskWithRecover(p.jobsCtx, task)
}
}
func (p *poller) runTaskWithRecover(ctx context.Context, task *runnerv1.Task) {
defer func() {
if r := recover(); r != nil {
err := fmt.Errorf("panic: %v", r)
log.WithError(err).Error("panic in runTaskWithRecover")
if err := p.runner.Run(ctx, task); err != nil {
log.WithError(err).Error("failed to run task")
}
}()
if err := p.runner.Run(ctx, task); err != nil {
log.WithError(err).Error("failed to run task")
}
}
func (p *poller) fetchTask(ctx context.Context) (*runnerv1.Task, bool) {
func (p *Poller) fetchTask(ctx context.Context) (*runnerv1.Task, bool) {
reqCtx, cancel := context.WithTimeout(ctx, p.cfg.Runner.FetchTimeout)
defer cancel()
// Load the version value that was in the cache when the request was sent.
v := p.tasksVersion.Load()
resp, err := p.client.FetchTask(reqCtx, connect.NewRequest(&runnerv1.FetchTaskRequest{
TasksVersion: v,
}))
resp, err := p.client.FetchTask(reqCtx, connect.NewRequest(&runnerv1.FetchTaskRequest{}))
if errors.Is(err, context.DeadlineExceeded) {
log.Trace("deadline exceeded")
err = nil
}
if err != nil {
if errors.Is(err, context.Canceled) {
log.WithError(err).Debugf("shutdown, fetch task canceled")
} else {
log.WithError(err).Error("failed to fetch task")
}
log.WithError(err).Error("failed to fetch task")
return nil, false
}
if resp == nil || resp.Msg == nil {
if resp == nil || resp.Msg == nil || resp.Msg.Task == nil {
return nil, false
}
if resp.Msg.TasksVersion > v {
p.tasksVersion.CompareAndSwap(v, resp.Msg.TasksVersion)
}
if resp.Msg.Task == nil {
return nil, false
}
// got a task, set `tasksVersion` to zero to focre query db in next request.
p.tasksVersion.CompareAndSwap(resp.Msg.TasksVersion, 0)
return resp.Msg.Task, true
}

View file

@ -1,263 +0,0 @@
// Copyright The Forgejo Authors.
// SPDX-License-Identifier: MIT
package poll
import (
"context"
"fmt"
"testing"
"time"
"connectrpc.com/connect"
"code.gitea.io/actions-proto-go/ping/v1/pingv1connect"
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
"code.gitea.io/actions-proto-go/runner/v1/runnerv1connect"
"gitea.com/gitea/act_runner/internal/pkg/config"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
)
type mockPoller struct {
poller
}
func (o *mockPoller) Poll() {
o.poller.Poll()
}
type mockClient struct {
pingv1connect.PingServiceClient
runnerv1connect.RunnerServiceClient
sleep time.Duration
cancel bool
err error
noTask bool
}
func (o mockClient) Address() string {
return ""
}
func (o mockClient) Insecure() bool {
return true
}
func (o *mockClient) FetchTask(ctx context.Context, req *connect.Request[runnerv1.FetchTaskRequest]) (*connect.Response[runnerv1.FetchTaskResponse], error) {
if o.sleep > 0 {
select {
case <-ctx.Done():
log.Trace("fetch task done")
return nil, context.DeadlineExceeded
case <-time.After(o.sleep):
log.Trace("slept")
return nil, fmt.Errorf("unexpected")
}
}
if o.cancel {
return nil, context.Canceled
}
if o.err != nil {
return nil, o.err
}
task := &runnerv1.Task{}
if o.noTask {
task = nil
o.noTask = false
}
return connect.NewResponse(&runnerv1.FetchTaskResponse{
Task: task,
TasksVersion: int64(1),
}), nil
}
type mockRunner struct {
cfg *config.Runner
log chan string
panics bool
err error
}
func (o *mockRunner) Run(ctx context.Context, task *runnerv1.Task) error {
o.log <- "runner starts"
if o.panics {
log.Trace("panics")
o.log <- "runner panics"
o.panics = false
panic("whatever")
}
if o.err != nil {
log.Trace("error")
o.log <- "runner error"
err := o.err
o.err = nil
return err
}
for {
select {
case <-ctx.Done():
log.Trace("shutdown")
o.log <- "runner shutdown"
return nil
case <-time.After(o.cfg.Timeout):
log.Trace("after")
o.log <- "runner timeout"
return nil
}
}
}
func setTrace(t *testing.T) {
t.Helper()
log.SetReportCaller(true)
log.SetLevel(log.TraceLevel)
}
func TestPoller_New(t *testing.T) {
p := New(&config.Config{}, &mockClient{}, &mockRunner{})
assert.NotNil(t, p)
}
func TestPoller_Runner(t *testing.T) {
setTrace(t)
for _, testCase := range []struct {
name string
timeout time.Duration
noTask bool
panics bool
err error
expected string
contextTimeout time.Duration
}{
{
name: "Simple",
timeout: 10 * time.Second,
expected: "runner shutdown",
},
{
name: "Panics",
timeout: 10 * time.Second,
panics: true,
expected: "runner panics",
},
{
name: "Error",
timeout: 10 * time.Second,
err: fmt.Errorf("ERROR"),
expected: "runner error",
},
{
name: "PollTaskError",
timeout: 10 * time.Second,
noTask: true,
expected: "runner shutdown",
},
{
name: "ShutdownTimeout",
timeout: 1 * time.Second,
contextTimeout: 1 * time.Minute,
expected: "runner timeout",
},
} {
t.Run(testCase.name, func(t *testing.T) {
runnerLog := make(chan string, 3)
configRunner := config.Runner{
FetchInterval: 1,
Capacity: 1,
Timeout: testCase.timeout,
}
p := &mockPoller{}
p.init(
&config.Config{
Runner: configRunner,
},
&mockClient{
noTask: testCase.noTask,
},
&mockRunner{
cfg: &configRunner,
log: runnerLog,
panics: testCase.panics,
err: testCase.err,
})
go p.Poll()
assert.Equal(t, "runner starts", <-runnerLog)
var ctx context.Context
var cancel context.CancelFunc
if testCase.contextTimeout > 0 {
ctx, cancel = context.WithTimeout(context.Background(), testCase.contextTimeout)
defer cancel()
} else {
ctx, cancel = context.WithCancel(context.Background())
cancel()
}
p.Shutdown(ctx)
<-p.done
assert.Equal(t, testCase.expected, <-runnerLog)
})
}
}
func TestPoller_Fetch(t *testing.T) {
setTrace(t)
for _, testCase := range []struct {
name string
noTask bool
sleep time.Duration
err error
cancel bool
success bool
}{
{
name: "Success",
success: true,
},
{
name: "Timeout",
sleep: 100 * time.Millisecond,
},
{
name: "Canceled",
cancel: true,
},
{
name: "NoTask",
noTask: true,
},
{
name: "Error",
err: fmt.Errorf("random error"),
},
} {
t.Run(testCase.name, func(t *testing.T) {
configRunner := config.Runner{
FetchTimeout: 1 * time.Millisecond,
}
p := &mockPoller{}
p.init(
&config.Config{
Runner: configRunner,
},
&mockClient{
sleep: testCase.sleep,
cancel: testCase.cancel,
noTask: testCase.noTask,
err: testCase.err,
},
&mockRunner{},
)
task, ok := p.fetchTask(context.Background())
if testCase.success {
assert.True(t, ok)
assert.NotNil(t, task)
} else {
assert.False(t, ok)
assert.Nil(t, task)
}
})
}
}

View file

@ -13,14 +13,12 @@ import (
"time"
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
"connectrpc.com/connect"
"github.com/docker/docker/api/types/container"
"github.com/nektos/act/pkg/artifactcache"
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/model"
"github.com/nektos/act/pkg/runner"
log "github.com/sirupsen/logrus"
"gitea.com/gitea/act_runner/internal/app/artifactcache"
"gitea.com/gitea/act_runner/internal/pkg/client"
"gitea.com/gitea/act_runner/internal/pkg/config"
"gitea.com/gitea/act_runner/internal/pkg/labels"
@ -41,10 +39,6 @@ type Runner struct {
runningTasks sync.Map
}
type RunnerInterface interface {
Run(ctx context.Context, task *runnerv1.Task) error
}
func NewRunner(cfg *config.Config, reg *config.Registration, cli client.Client) *Runner {
ls := labels.Labels{}
for _, v := range reg.Labels {
@ -64,28 +58,18 @@ func NewRunner(cfg *config.Config, reg *config.Registration, cli client.Client)
envs[k] = v
}
if cfg.Cache.Enabled == nil || *cfg.Cache.Enabled {
if cfg.Cache.ExternalServer != "" {
envs["ACTIONS_CACHE_URL"] = cfg.Cache.ExternalServer
cacheHandler, err := artifactcache.StartHandler(cfg.Cache.Dir, cfg.Cache.Host, cfg.Cache.Port)
if err != nil {
log.Errorf("cannot init cache server, it will be disabled: %v", err)
// go on
} else {
cacheHandler, err := artifactcache.StartHandler(
cfg.Cache.Dir,
cfg.Cache.Host,
cfg.Cache.Port,
log.StandardLogger().WithField("module", "cache_request"),
)
if err != nil {
log.Errorf("cannot init cache server, it will be disabled: %v", err)
// go on
} else {
envs["ACTIONS_CACHE_URL"] = cacheHandler.ExternalURL() + "/"
}
envs["ACTIONS_CACHE_URL"] = cacheHandler.ExternalURL() + "/"
}
}
// set artifact gitea api
artifactGiteaAPI := strings.TrimSuffix(cli.Address(), "/") + "/api/actions_pipeline/"
envs["ACTIONS_RUNTIME_URL"] = artifactGiteaAPI
envs["ACTIONS_RESULTS_URL"] = strings.TrimSuffix(cli.Address(), "/")
// Set specific environments to distinguish between Gitea and GitHub
envs["GITEA_ACTIONS"] = "true"
@ -103,13 +87,14 @@ func NewRunner(cfg *config.Config, reg *config.Registration, cli client.Client)
func (r *Runner) Run(ctx context.Context, task *runnerv1.Task) error {
if _, ok := r.runningTasks.Load(task.Id); ok {
return fmt.Errorf("task %d is already running", task.Id)
} else {
r.runningTasks.Store(task.Id, struct{}{})
defer r.runningTasks.Delete(task.Id)
}
r.runningTasks.Store(task.Id, struct{}{})
defer r.runningTasks.Delete(task.Id)
ctx, cancel := context.WithTimeout(ctx, r.cfg.Runner.Timeout)
defer cancel()
reporter := report.NewReporter(ctx, cancel, r.client, task, r.cfg.Runner.ReportInterval)
reporter := report.NewReporter(ctx, cancel, r.client, task)
var runErr error
defer func() {
lastWords := ""
@ -174,12 +159,8 @@ func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.
preset.Token = t
}
giteaRuntimeToken := taskContext["gitea_runtime_token"].GetStringValue()
if giteaRuntimeToken == "" {
// use task token to action api token for previous Gitea Server Versions
giteaRuntimeToken = preset.Token
}
r.envs["ACTIONS_RUNTIME_TOKEN"] = giteaRuntimeToken
// use task token to action api token
r.envs["ACTIONS_RUNTIME_TOKEN"] = preset.Token
eventJSON, err := json.Marshal(preset.Event)
if err != nil {
@ -191,45 +172,32 @@ func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.
maxLifetime = time.Until(deadline)
}
var inputs map[string]string
if preset.EventName == "workflow_dispatch" {
if inputsRaw, ok := preset.Event["inputs"]; ok {
inputs, _ = inputsRaw.(map[string]string)
}
}
runnerConfig := &runner.Config{
// On Linux, Workdir will be like "/<parent_directory>/<owner>/<repo>"
// On Windows, Workdir will be like "\<parent_directory>\<owner>\<repo>"
Workdir: filepath.FromSlash(filepath.Clean(fmt.Sprintf("/%s/%s", r.cfg.Container.WorkdirParent, preset.Repository))),
BindWorkdir: false,
ActionCacheDir: filepath.FromSlash(r.cfg.Host.WorkdirParent),
Workdir: filepath.FromSlash(fmt.Sprintf("/%s/%s", r.cfg.Container.WorkdirParent, preset.Repository)),
BindWorkdir: false,
ReuseContainers: false,
ForcePull: r.cfg.Container.ForcePull,
ForceRebuild: false,
LogOutput: true,
JSONLogger: false,
Env: r.envs,
Secrets: task.Secrets,
GitHubInstance: strings.TrimSuffix(r.client.Address(), "/"),
AutoRemove: true,
NoSkipCheckout: true,
PresetGitHubContext: preset,
EventJSON: string(eventJSON),
ContainerNamePrefix: fmt.Sprintf("GITEA-ACTIONS-TASK-%d", task.Id),
ContainerMaxLifetime: maxLifetime,
ContainerNetworkMode: container.NetworkMode(r.cfg.Container.Network),
ContainerNetworkEnableIPv6: r.cfg.Container.EnableIPv6,
ContainerOptions: r.cfg.Container.Options,
ContainerDaemonSocket: r.cfg.Container.DockerHost,
Privileged: r.cfg.Container.Privileged,
DefaultActionInstance: taskContext["gitea_default_actions_url"].GetStringValue(),
PlatformPicker: r.labels.PickPlatform,
Vars: task.Vars,
ValidVolumes: r.cfg.Container.ValidVolumes,
InsecureSkipTLS: r.cfg.Runner.Insecure,
Inputs: inputs,
ReuseContainers: false,
ForcePull: false,
ForceRebuild: false,
LogOutput: true,
JSONLogger: false,
Env: r.envs,
Secrets: task.Secrets,
GitHubInstance: r.client.Address(),
AutoRemove: true,
NoSkipCheckout: true,
PresetGitHubContext: preset,
EventJSON: string(eventJSON),
ContainerNamePrefix: fmt.Sprintf("GITEA-ACTIONS-TASK-%d", task.Id),
ContainerMaxLifetime: maxLifetime,
ContainerNetworkMode: r.cfg.Container.NetworkMode,
ContainerOptions: r.cfg.Container.Options,
Privileged: r.cfg.Container.Privileged,
DefaultActionInstance: taskContext["gitea_default_actions_url"].GetStringValue(),
PlatformPicker: r.labels.PickPlatform,
Vars: task.Vars,
}
rr, err := runner.New(runnerConfig)
@ -247,14 +215,3 @@ func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.
reporter.SetOutputs(job.Outputs)
return execErr
}
func (r *Runner) Declare(ctx context.Context, labels []string) (*connect.Response[runnerv1.DeclareResponse], error) {
return r.client.Declare(ctx, connect.NewRequest(&runnerv1.DeclareRequest{
Version: ver.Version(),
Labels: labels,
}))
}
func (r *Runner) Update(ctx context.Context, labels labels.Labels) {
r.labels = labels
}

View file

@ -1,37 +0,0 @@
package run
import (
"context"
"testing"
"gitea.com/gitea/act_runner/internal/pkg/labels"
"github.com/stretchr/testify/assert"
)
func TestLabelUpdate(t *testing.T) {
ctx := context.Background()
ls := labels.Labels{}
initialLabel, err := labels.Parse("testlabel:docker://alpine")
assert.NoError(t, err)
ls = append(ls, initialLabel)
newLs := labels.Labels{}
newLabel, err := labels.Parse("next label:host")
assert.NoError(t, err)
newLs = append(newLs, initialLabel)
newLs = append(newLs, newLabel)
runner := Runner{
labels: ls,
}
assert.Contains(t, runner.labels, initialLabel)
assert.NotContains(t, runner.labels, newLabel)
runner.Update(ctx, newLs)
assert.Contains(t, runner.labels, initialLabel)
assert.Contains(t, runner.labels, newLabel)
}

View file

@ -19,7 +19,7 @@ func Test_generateWorkflow(t *testing.T) {
tests := []struct {
name string
args args
assert func(t *testing.T, wf *model.Workflow, err error)
assert func(t *testing.T, wf *model.Workflow)
want1 string
wantErr bool
}{
@ -56,41 +56,19 @@ jobs:
},
},
},
assert: func(t *testing.T, wf *model.Workflow, err error) {
assert: func(t *testing.T, wf *model.Workflow) {
assert.DeepEqual(t, wf.GetJob("job9").Needs(), []string{"job1", "job2"})
},
want1: "job9",
wantErr: false,
},
{
name: "valid YAML syntax in top level env but wrong value type",
args: args{
task: &runnerv1.Task{
WorkflowPayload: []byte(`
on: push
env:
value: {{ }}
`),
},
},
assert: func(t *testing.T, wf *model.Workflow, err error) {
require.Nil(t, wf)
assert.ErrorContains(t, err, "cannot unmarshal")
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, got1, err := generateWorkflow(tt.args.task)
if tt.wantErr {
require.Error(t, err)
} else {
require.NoError(t, err)
assert.Equal(t, got1, tt.want1)
}
tt.assert(t, got, err)
require.NoError(t, err)
tt.assert(t, got)
assert.Equal(t, got1, tt.want1)
})
}
}

View file

@ -9,8 +9,6 @@ import (
)
// A Client manages communication with the runner.
//
//go:generate mockery --name Client
type Client interface {
pingv1connect.PingServiceClient
runnerv1connect.RunnerServiceClient

View file

@ -4,8 +4,7 @@
package client
const (
UUIDHeader = "x-runner-uuid"
TokenHeader = "x-runner-token"
// Deprecated: could be removed after Gitea 1.20 released
UUIDHeader = "x-runner-uuid"
TokenHeader = "x-runner-token"
VersionHeader = "x-runner-version"
)

View file

@ -11,10 +11,10 @@ import (
"code.gitea.io/actions-proto-go/ping/v1/pingv1connect"
"code.gitea.io/actions-proto-go/runner/v1/runnerv1connect"
"connectrpc.com/connect"
"github.com/bufbuild/connect-go"
)
func getHTTPClient(endpoint string, insecure bool) *http.Client {
func getHttpClient(endpoint string, insecure bool) *http.Client {
if strings.HasPrefix(endpoint, "https://") && insecure {
return &http.Client{
Transport: &http.Transport{
@ -39,7 +39,6 @@ func New(endpoint string, insecure bool, uuid, token, version string, opts ...co
if token != "" {
req.Header().Set(TokenHeader, token)
}
// TODO: version will be removed from request header after Gitea 1.20 released.
if version != "" {
req.Header().Set(VersionHeader, version)
}
@ -49,12 +48,12 @@ func New(endpoint string, insecure bool, uuid, token, version string, opts ...co
return &HTTPClient{
PingServiceClient: pingv1connect.NewPingServiceClient(
getHTTPClient(endpoint, insecure),
getHttpClient(endpoint, insecure),
baseURL,
opts...,
),
RunnerServiceClient: runnerv1connect.NewRunnerServiceClient(
getHTTPClient(endpoint, insecure),
getHttpClient(endpoint, insecure),
baseURL,
opts...,
),

View file

@ -1,219 +0,0 @@
// Code generated by mockery v2.26.1. DO NOT EDIT.
package mocks
import (
context "context"
connect "connectrpc.com/connect"
mock "github.com/stretchr/testify/mock"
pingv1 "code.gitea.io/actions-proto-go/ping/v1"
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
)
// Client is an autogenerated mock type for the Client type
type Client struct {
mock.Mock
}
// Address provides a mock function with given fields:
func (_m *Client) Address() string {
ret := _m.Called()
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(string)
}
return r0
}
// Declare provides a mock function with given fields: _a0, _a1
func (_m *Client) Declare(_a0 context.Context, _a1 *connect.Request[runnerv1.DeclareRequest]) (*connect.Response[runnerv1.DeclareResponse], error) {
ret := _m.Called(_a0, _a1)
var r0 *connect.Response[runnerv1.DeclareResponse]
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.DeclareRequest]) (*connect.Response[runnerv1.DeclareResponse], error)); ok {
return rf(_a0, _a1)
}
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.DeclareRequest]) *connect.Response[runnerv1.DeclareResponse]); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*connect.Response[runnerv1.DeclareResponse])
}
}
if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[runnerv1.DeclareRequest]) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// FetchTask provides a mock function with given fields: _a0, _a1
func (_m *Client) FetchTask(_a0 context.Context, _a1 *connect.Request[runnerv1.FetchTaskRequest]) (*connect.Response[runnerv1.FetchTaskResponse], error) {
ret := _m.Called(_a0, _a1)
var r0 *connect.Response[runnerv1.FetchTaskResponse]
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.FetchTaskRequest]) (*connect.Response[runnerv1.FetchTaskResponse], error)); ok {
return rf(_a0, _a1)
}
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.FetchTaskRequest]) *connect.Response[runnerv1.FetchTaskResponse]); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*connect.Response[runnerv1.FetchTaskResponse])
}
}
if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[runnerv1.FetchTaskRequest]) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Insecure provides a mock function with given fields:
func (_m *Client) Insecure() bool {
ret := _m.Called()
var r0 bool
if rf, ok := ret.Get(0).(func() bool); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// Ping provides a mock function with given fields: _a0, _a1
func (_m *Client) Ping(_a0 context.Context, _a1 *connect.Request[pingv1.PingRequest]) (*connect.Response[pingv1.PingResponse], error) {
ret := _m.Called(_a0, _a1)
var r0 *connect.Response[pingv1.PingResponse]
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[pingv1.PingRequest]) (*connect.Response[pingv1.PingResponse], error)); ok {
return rf(_a0, _a1)
}
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[pingv1.PingRequest]) *connect.Response[pingv1.PingResponse]); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*connect.Response[pingv1.PingResponse])
}
}
if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[pingv1.PingRequest]) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Register provides a mock function with given fields: _a0, _a1
func (_m *Client) Register(_a0 context.Context, _a1 *connect.Request[runnerv1.RegisterRequest]) (*connect.Response[runnerv1.RegisterResponse], error) {
ret := _m.Called(_a0, _a1)
var r0 *connect.Response[runnerv1.RegisterResponse]
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.RegisterRequest]) (*connect.Response[runnerv1.RegisterResponse], error)); ok {
return rf(_a0, _a1)
}
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.RegisterRequest]) *connect.Response[runnerv1.RegisterResponse]); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*connect.Response[runnerv1.RegisterResponse])
}
}
if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[runnerv1.RegisterRequest]) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// UpdateLog provides a mock function with given fields: _a0, _a1
func (_m *Client) UpdateLog(_a0 context.Context, _a1 *connect.Request[runnerv1.UpdateLogRequest]) (*connect.Response[runnerv1.UpdateLogResponse], error) {
ret := _m.Called(_a0, _a1)
var r0 *connect.Response[runnerv1.UpdateLogResponse]
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.UpdateLogRequest]) (*connect.Response[runnerv1.UpdateLogResponse], error)); ok {
return rf(_a0, _a1)
}
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.UpdateLogRequest]) *connect.Response[runnerv1.UpdateLogResponse]); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*connect.Response[runnerv1.UpdateLogResponse])
}
}
if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[runnerv1.UpdateLogRequest]) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// UpdateTask provides a mock function with given fields: _a0, _a1
func (_m *Client) UpdateTask(_a0 context.Context, _a1 *connect.Request[runnerv1.UpdateTaskRequest]) (*connect.Response[runnerv1.UpdateTaskResponse], error) {
ret := _m.Called(_a0, _a1)
var r0 *connect.Response[runnerv1.UpdateTaskResponse]
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.UpdateTaskRequest]) (*connect.Response[runnerv1.UpdateTaskResponse], error)); ok {
return rf(_a0, _a1)
}
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.UpdateTaskRequest]) *connect.Response[runnerv1.UpdateTaskResponse]); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*connect.Response[runnerv1.UpdateTaskResponse])
}
}
if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[runnerv1.UpdateTaskRequest]) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
type mockConstructorTestingTNewClient interface {
mock.TestingT
Cleanup(func())
}
// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewClient(t mockConstructorTestingTNewClient) *Client {
mock := &Client{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View file

@ -1,8 +1,5 @@
# Example configuration file, it's safe to copy this as the default config file without any modification.
# You don't have to copy this file to your instance,
# just run `./act_runner generate-config > config.yaml` to generate a config file.
log:
# The level of logging, can be trace, debug, info, warn, error, fatal
level: info
@ -20,28 +17,15 @@ runner:
# It will be ignored if it's empty or the file doesn't exist.
env_file: .env
# The timeout for a job to be finished.
# Please note that the Forgejo instance also has a timeout (3h by default) for the job.
# So the job could be stopped by the Forgejo instance if it's timeout is shorter than this.
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
timeout: 3h
# The timeout for the runner to wait for running jobs to finish when
# shutting down because a TERM or INT signal has been received. Any
# running jobs that haven't finished after this timeout will be
# cancelled.
# If unset or zero the jobs will be cancelled immediately.
shutdown_timeout: 3h
# Whether skip verifying the TLS certificate of the instance.
# Whether skip verifying the TLS certificate of the Gitea instance.
insecure: false
# The timeout for fetching the job from the Forgejo instance.
# The timeout for fetching the job from the Gitea instance.
fetch_timeout: 5s
# The interval for fetching the job from the Forgejo instance.
# The interval for fetching the job from the Gitea instance.
fetch_interval: 2s
# The interval for reporting the job status and logs to the Forgejo instance.
report_interval: 1s
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
# Like: ["macos-arm64:host", "ubuntu-latest:docker://node:20-bookworm", "ubuntu-22.04:docker://node:20-bookworm"]
# If it's empty when registering, it will ask for inputting labels.
# If it's empty when execute `deamon`, will use labels in `.runner` file.
labels: []
cache:
# Enable cache server to use actions/cache.
@ -56,45 +40,14 @@ cache:
# The port of the cache server.
# 0 means to use a random available port.
port: 0
# The external cache server URL. Valid only when enable is true.
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
# The URL should generally end with "/".
external_server: ""
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, create a network automatically.
network: ""
# Whether to create networks with IPv6 enabled. Requires the Docker daemon to be set up accordingly.
# Only takes effect if "network" is set to "".
enable_ipv6: false
# Which network to use for the job containers. Could be bridge, host, none, or the name of a custom network.
network_mode: bridge
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
privileged: false
# And other options to be used when the container is started (eg, --add-host=my.forgejo.url:host-gateway).
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
options:
# The parent directory of a job's working directory.
# If it's empty, /workspace will be used.
workdir_parent:
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
# valid_volumes:
# - data
# - /src/*.json
# If you want to allow any volume, please use the following configuration:
# valid_volumes:
# - '**'
valid_volumes: []
# overrides the docker client host with the specified one.
# If it's empty, act_runner will find an available docker host automatically.
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
docker_host: ""
# Pull docker image(s) even if already present
force_pull: false
host:
# The parent directory of a job's working directory.
# If it's empty, $HOME/.cache/act/ will be used.
workdir_parent:

View file

@ -10,74 +10,35 @@ import (
"time"
"github.com/joho/godotenv"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v3"
)
// Log represents the configuration for logging.
type Log struct {
Level string `yaml:"level"` // Level indicates the logging level.
}
// Runner represents the configuration for the runner.
type Runner struct {
File string `yaml:"file"` // File specifies the file path for the runner.
Capacity int `yaml:"capacity"` // Capacity specifies the capacity of the runner.
Envs map[string]string `yaml:"envs"` // Envs stores environment variables for the runner.
EnvFile string `yaml:"env_file"` // EnvFile specifies the path to the file containing environment variables for the runner.
Timeout time.Duration `yaml:"timeout"` // Timeout specifies the duration for runner timeout.
ShutdownTimeout time.Duration `yaml:"shutdown_timeout"` // ShutdownTimeout specifies the duration to wait for running jobs to complete during a shutdown of the runner.
Insecure bool `yaml:"insecure"` // Insecure indicates whether the runner operates in an insecure mode.
FetchTimeout time.Duration `yaml:"fetch_timeout"` // FetchTimeout specifies the timeout duration for fetching resources.
FetchInterval time.Duration `yaml:"fetch_interval"` // FetchInterval specifies the interval duration for fetching resources.
ReportInterval time.Duration `yaml:"report_interval"` // ReportInterval specifies the interval duration for reporting status and logs of a running job.
Labels []string `yaml:"labels"` // Labels specify the labels of the runner. Labels are declared on each startup
}
// Cache represents the configuration for caching.
type Cache struct {
Enabled *bool `yaml:"enabled"` // Enabled indicates whether caching is enabled. It is a pointer to distinguish between false and not set. If not set, it will be true.
Dir string `yaml:"dir"` // Dir specifies the directory path for caching.
Host string `yaml:"host"` // Host specifies the caching host.
Port uint16 `yaml:"port"` // Port specifies the caching port.
ExternalServer string `yaml:"external_server"` // ExternalServer specifies the URL of external cache server
}
// Container represents the configuration for the container.
type Container struct {
Network string `yaml:"network"` // Network specifies the network for the container.
NetworkMode string `yaml:"network_mode"` // Deprecated: use Network instead. Could be removed after Gitea 1.20
EnableIPv6 bool `yaml:"enable_ipv6"` // EnableIPv6 indicates whether the network is created with IPv6 enabled.
Privileged bool `yaml:"privileged"` // Privileged indicates whether the container runs in privileged mode.
Options string `yaml:"options"` // Options specifies additional options for the container.
WorkdirParent string `yaml:"workdir_parent"` // WorkdirParent specifies the parent directory for the container's working directory.
ValidVolumes []string `yaml:"valid_volumes"` // ValidVolumes specifies the volumes (including bind mounts) can be mounted to containers.
DockerHost string `yaml:"docker_host"` // DockerHost specifies the Docker host. It overrides the value specified in environment variable DOCKER_HOST.
ForcePull bool `yaml:"force_pull"` // Pull docker image(s) even if already present
}
// Host represents the configuration for the host.
type Host struct {
WorkdirParent string `yaml:"workdir_parent"` // WorkdirParent specifies the parent directory for the host's working directory.
}
// Config represents the overall configuration.
type Config struct {
Log Log `yaml:"log"` // Log represents the configuration for logging.
Runner Runner `yaml:"runner"` // Runner represents the configuration for the runner.
Cache Cache `yaml:"cache"` // Cache represents the configuration for caching.
Container Container `yaml:"container"` // Container represents the configuration for the container.
Host Host `yaml:"host"` // Host represents the configuration for the host.
}
// Tune the config settings accordingly to the Forgejo instance that will be used.
func (c *Config) Tune(instanceURL string) {
if instanceURL == "https://codeberg.org" {
if c.Runner.FetchInterval < 30*time.Second {
log.Info("The runner is configured to be used by a public instance, fetch interval is set to 30 seconds.")
c.Runner.FetchInterval = 30 * time.Second
}
}
Log struct {
Level string `yaml:"level"`
} `yaml:"log"`
Runner struct {
File string `yaml:"file"`
Capacity int `yaml:"capacity"`
Envs map[string]string `yaml:"envs"`
EnvFile string `yaml:"env_file"`
Timeout time.Duration `yaml:"timeout"`
Insecure bool `yaml:"insecure"`
FetchTimeout time.Duration `yaml:"fetch_timeout"`
FetchInterval time.Duration `yaml:"fetch_interval"`
} `yaml:"runner"`
Cache struct {
Enabled *bool `yaml:"enabled"` // pointer to distinguish between false and not set, and it will be true if not set
Dir string `yaml:"dir"`
Host string `yaml:"host"`
Port uint16 `yaml:"port"`
} `yaml:"cache"`
Container struct {
NetworkMode string `yaml:"network_mode"`
Privileged bool `yaml:"privileged"`
Options string `yaml:"options"`
WorkdirParent string `yaml:"workdir_parent"`
} `yaml:"container"`
}
// LoadDefault returns the default configuration.
@ -85,12 +46,14 @@ func (c *Config) Tune(instanceURL string) {
func LoadDefault(file string) (*Config, error) {
cfg := &Config{}
if file != "" {
content, err := os.ReadFile(file)
f, err := os.Open(file)
if err != nil {
return nil, fmt.Errorf("open config file %q: %w", file, err)
return nil, err
}
if err := yaml.Unmarshal(content, cfg); err != nil {
return nil, fmt.Errorf("parse config file %q: %w", file, err)
defer f.Close()
decoder := yaml.NewDecoder(f)
if err := decoder.Decode(&cfg); err != nil {
return nil, err
}
}
compatibleWithOldEnvs(file != "", cfg)
@ -101,9 +64,6 @@ func LoadDefault(file string) (*Config, error) {
if err != nil {
return nil, fmt.Errorf("read env file %q: %w", cfg.Runner.EnvFile, err)
}
if cfg.Runner.Envs == nil {
cfg.Runner.Envs = map[string]string{}
}
for k, v := range envs {
cfg.Runner.Envs[k] = v
}
@ -132,35 +92,18 @@ func LoadDefault(file string) (*Config, error) {
cfg.Cache.Dir = filepath.Join(home, ".cache", "actcache")
}
}
if cfg.Container.NetworkMode == "" {
cfg.Container.NetworkMode = "bridge"
}
if cfg.Container.WorkdirParent == "" {
cfg.Container.WorkdirParent = "workspace"
}
if cfg.Host.WorkdirParent == "" {
home, _ := os.UserHomeDir()
cfg.Host.WorkdirParent = filepath.Join(home, ".cache", "act")
}
if cfg.Runner.FetchTimeout <= 0 {
cfg.Runner.FetchTimeout = 5 * time.Second
}
if cfg.Runner.FetchInterval <= 0 {
cfg.Runner.FetchInterval = 2 * time.Second
}
if cfg.Runner.ReportInterval <= 0 {
cfg.Runner.ReportInterval = time.Second
}
// although `container.network_mode` will be deprecated, but we have to be compatible with it for now.
if cfg.Container.NetworkMode != "" && cfg.Container.Network == "" {
log.Warn("You are trying to use deprecated configuration item of `container.network_mode`, please use `container.network` instead.")
if cfg.Container.NetworkMode == "bridge" {
// Previously, if the value of `container.network_mode` is `bridge`, we will create a new network for job.
// But “bridge” is easily confused with the bridge network created by Docker by default.
// So we set the value of `container.network` to empty string to make `act_runner` automatically create a new network for job.
cfg.Container.Network = ""
} else {
cfg.Container.Network = cfg.Container.NetworkMode
}
}
return cfg, nil
}

View file

@ -1,37 +0,0 @@
// Copyright 2024 The Forgejo Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package config
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestConfigTune(t *testing.T) {
c := &Config{
Runner: Runner{},
}
t.Run("Public instance tuning", func(t *testing.T) {
c.Runner.FetchInterval = 60 * time.Second
c.Tune("https://codeberg.org")
assert.EqualValues(t, 60*time.Second, c.Runner.FetchInterval)
c.Runner.FetchInterval = 2 * time.Second
c.Tune("https://codeberg.org")
assert.EqualValues(t, 30*time.Second, c.Runner.FetchInterval)
})
t.Run("Non-public instance tuning", func(t *testing.T) {
c.Runner.FetchInterval = 60 * time.Second
c.Tune("https://example.com")
assert.EqualValues(t, 60*time.Second, c.Runner.FetchInterval)
c.Runner.FetchInterval = 2 * time.Second
c.Tune("https://codeberg.com")
assert.EqualValues(t, 2*time.Second, c.Runner.FetchInterval)
})
}

View file

@ -10,16 +10,9 @@ import (
"github.com/docker/docker/client"
)
func CheckIfDockerRunning(ctx context.Context, configDockerHost string) error {
opts := []client.Opt{
client.FromEnv,
}
if configDockerHost != "" {
opts = append(opts, client.WithHost(configDockerHost))
}
cli, err := client.NewClientWithOpts(opts...)
func CheckIfDockerRunning(ctx context.Context) error {
// TODO: if runner support configures to use docker, we need config.Config to pass in
cli, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
return err
}
@ -27,7 +20,7 @@ func CheckIfDockerRunning(ctx context.Context, configDockerHost string) error {
_, err = cli.Ping(ctx)
if err != nil {
return fmt.Errorf("cannot ping the docker daemon. is it running? %w", err)
return fmt.Errorf("cannot ping the docker daemon, does it running? %w", err)
}
return nil

View file

@ -11,7 +11,6 @@ import (
const (
SchemeHost = "host"
SchemeDocker = "docker"
SchemeLXC = "lxc"
)
type Label struct {
@ -33,7 +32,7 @@ func Parse(str string) (*Label, error) {
if len(splits) >= 3 {
label.Arg = splits[2]
}
if label.Schema != SchemeHost && label.Schema != SchemeDocker && label.Schema != SchemeLXC {
if label.Schema != SchemeHost && label.Schema != SchemeDocker {
return nil, fmt.Errorf("unsupported schema: %s", label.Schema)
}
return label, nil
@ -56,11 +55,10 @@ func (l Labels) PickPlatform(runsOn []string) string {
switch label.Schema {
case SchemeDocker:
// "//" will be ignored
// TODO maybe we should use 'ubuntu-18.04:docker:node:16-buster' instead
platforms[label.Name] = strings.TrimPrefix(label.Arg, "//")
case SchemeHost:
platforms[label.Name] = "-self-hosted"
case SchemeLXC:
platforms[label.Name] = "lxc:" + strings.TrimPrefix(label.Arg, "//")
default:
// It should not happen, because Parse has checked it.
continue
@ -82,28 +80,5 @@ func (l Labels) PickPlatform(runsOn []string) string {
// So the runner receives a task with a label that the runner doesn't have,
// it happens when the user have edited the label of the runner in the web UI.
// TODO: it may be not correct, what if the runner is used as host mode only?
return "node:20-bullseye"
}
func (l Labels) Names() []string {
names := make([]string, 0, len(l))
for _, label := range l {
names = append(names, label.Name)
}
return names
}
func (l Labels) ToStrings() []string {
ls := make([]string, 0, len(l))
for _, label := range l {
lbl := label.Name
if label.Schema != "" {
lbl += ":" + label.Schema
if label.Arg != "" {
lbl += ":" + label.Arg
}
}
ls = append(ls, lbl)
}
return ls
return "node:16-bullseye"
}

View file

@ -55,8 +55,9 @@ func TestParse(t *testing.T) {
if tt.wantErr {
require.Error(t, err)
return
} else {
require.NoError(t, err)
}
require.NoError(t, err)
assert.DeepEqual(t, got, tt.want)
})
}

View file

@ -12,8 +12,8 @@ import (
"time"
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
"connectrpc.com/connect"
retry "github.com/avast/retry-go/v4"
"github.com/bufbuild/connect-go"
log "github.com/sirupsen/logrus"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
@ -29,11 +29,10 @@ type Reporter struct {
client client.Client
clientM sync.Mutex
logOffset int
logRows []*runnerv1.LogRow
logReplacer *strings.Replacer
oldnew []string
reportInterval time.Duration
logOffset int
logRows []*runnerv1.LogRow
logReplacer *strings.Replacer
oldnew []string
state *runnerv1.TaskState
stateMu sync.RWMutex
@ -43,25 +42,21 @@ type Reporter struct {
stopCommandEndToken string
}
func NewReporter(ctx context.Context, cancel context.CancelFunc, client client.Client, task *runnerv1.Task, reportInterval time.Duration) *Reporter {
func NewReporter(ctx context.Context, cancel context.CancelFunc, client client.Client, task *runnerv1.Task) *Reporter {
var oldnew []string
if v := task.Context.Fields["token"].GetStringValue(); v != "" {
oldnew = append(oldnew, v, "***")
}
if v := task.Context.Fields["gitea_runtime_token"].GetStringValue(); v != "" {
oldnew = append(oldnew, v, "***")
}
for _, v := range task.Secrets {
oldnew = append(oldnew, v, "***")
}
rv := &Reporter{
ctx: ctx,
cancel: cancel,
client: client,
oldnew: oldnew,
reportInterval: reportInterval,
logReplacer: strings.NewReplacer(oldnew...),
ctx: ctx,
cancel: cancel,
client: client,
oldnew: oldnew,
logReplacer: strings.NewReplacer(oldnew...),
state: &runnerv1.TaskState{
Id: task.Id,
},
@ -116,9 +111,6 @@ func (r *Reporter) Fire(entry *log.Entry) error {
for _, s := range r.state.Steps {
if s.Result == runnerv1.Result_RESULT_UNSPECIFIED {
s.Result = runnerv1.Result_RESULT_CANCELLED
if jobResult == runnerv1.Result_RESULT_SKIPPED {
s.Result = runnerv1.Result_RESULT_SKIPPED
}
}
}
}
@ -147,13 +139,11 @@ func (r *Reporter) Fire(entry *log.Entry) error {
}
if v, ok := entry.Data["raw_output"]; ok {
if rawOutput, ok := v.(bool); ok && rawOutput {
if row := r.parseLogRow(entry); row != nil {
if step.LogLength == 0 {
step.LogIndex = int64(r.logOffset + len(r.logRows))
}
step.LogLength++
r.logRows = append(r.logRows, row)
if step.LogLength == 0 {
step.LogIndex = int64(r.logOffset + len(r.logRows))
}
step.LogLength++
r.logRows = appendIfNotNil(r.logRows, r.parseLogRow(entry))
}
} else if !r.duringSteps() {
r.logRows = appendIfNotNil(r.logRows, r.parseLogRow(entry))
@ -182,7 +172,7 @@ func (r *Reporter) RunDaemon() {
_ = r.ReportLog(false)
_ = r.ReportState()
time.AfterFunc(r.reportInterval, r.RunDaemon)
time.AfterFunc(time.Second, r.RunDaemon)
}
func (r *Reporter) Logf(format string, a ...interface{}) {
@ -394,13 +384,12 @@ func (r *Reporter) handleCommand(originalContent, command, parameters, value str
// Not implemented yet, so just return the original content.
return &originalContent
case "group":
// Rewriting into ##[] syntax which the frontend understands
content := "##[group]" + value
return &content
// Returning the original content, because I think the frontend
// will use it when rendering the output.
return &originalContent
case "endgroup":
// Ditto
content := "##[endgroup]"
return &content
return &originalContent
case "stop-commands":
r.stopCommandEndToken = value
return nil
@ -427,7 +416,7 @@ func (r *Reporter) parseLogRow(entry *log.Entry) *runnerv1.LogRow {
return &runnerv1.LogRow{
Time: timestamppb.New(entry.Time),
Content: strings.ToValidUTF8(content, "?"),
Content: content,
}
}

View file

@ -4,20 +4,11 @@
package report
import (
"context"
"strings"
"testing"
"time"
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
connect_go "connectrpc.com/connect"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/types/known/structpb"
"gitea.com/gitea/act_runner/internal/pkg/client/mocks"
"gotest.tools/v3/assert"
)
func TestReporter_parseLogRow(t *testing.T) {
@ -97,8 +88,8 @@ func TestReporter_parseLogRow(t *testing.T) {
"::endgroup::",
},
[]string{
"##[group]",
"##[endgroup]",
"::group::",
"::endgroup::",
},
},
{
@ -155,44 +146,3 @@ func TestReporter_parseLogRow(t *testing.T) {
})
}
}
func TestReporter_Fire(t *testing.T) {
t.Run("ignore command lines", func(t *testing.T) {
client := mocks.NewClient(t)
client.On("UpdateLog", mock.Anything, mock.Anything).Return(func(_ context.Context, req *connect_go.Request[runnerv1.UpdateLogRequest]) (*connect_go.Response[runnerv1.UpdateLogResponse], error) {
t.Logf("Received UpdateLog: %s", req.Msg.String())
return connect_go.NewResponse(&runnerv1.UpdateLogResponse{
AckIndex: req.Msg.Index + int64(len(req.Msg.Rows)),
}), nil
})
client.On("UpdateTask", mock.Anything, mock.Anything).Return(func(_ context.Context, req *connect_go.Request[runnerv1.UpdateTaskRequest]) (*connect_go.Response[runnerv1.UpdateTaskResponse], error) {
t.Logf("Received UpdateTask: %s", req.Msg.String())
return connect_go.NewResponse(&runnerv1.UpdateTaskResponse{}), nil
})
ctx, cancel := context.WithCancel(context.Background())
taskCtx, err := structpb.NewStruct(map[string]interface{}{})
require.NoError(t, err)
reporter := NewReporter(ctx, cancel, client, &runnerv1.Task{
Context: taskCtx,
}, time.Second)
defer func() {
assert.NoError(t, reporter.Close(""))
}()
reporter.ResetSteps(5)
dataStep0 := map[string]interface{}{
"stage": "Main",
"stepNumber": 0,
"raw_output": true,
}
assert.NoError(t, reporter.Fire(&log.Entry{Message: "regular log line", Data: dataStep0}))
assert.NoError(t, reporter.Fire(&log.Entry{Message: "::debug::debug log line", Data: dataStep0}))
assert.NoError(t, reporter.Fire(&log.Entry{Message: "regular log line", Data: dataStep0}))
assert.NoError(t, reporter.Fire(&log.Entry{Message: "::debug::debug log line", Data: dataStep0}))
assert.NoError(t, reporter.Fire(&log.Entry{Message: "::debug::debug log line", Data: dataStep0}))
assert.NoError(t, reporter.Fire(&log.Entry{Message: "regular log line", Data: dataStep0}))
assert.Equal(t, int64(3), reporter.state.Steps[0].LogLength)
})
}

View file

@ -1,11 +0,0 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": ["local>forgejo/renovate-config"],
"packageRules": [
{
"description": "Disable nektos/act, it's replaced",
"matchDepNames": ["github.com/nektos/act"],
"enabled": false
}
]
}

View file

@ -10,10 +10,6 @@ CONFIG_ARG=""
if [[ ! -z "${CONFIG_FILE}" ]]; then
CONFIG_ARG="--config ${CONFIG_FILE}"
fi
EXTRA_ARGS=""
if [[ ! -z "${GITEA_RUNNER_LABELS}" ]]; then
EXTRA_ARGS="${EXTRA_ARGS} --labels ${GITEA_RUNNER_LABELS}"
fi
# Use the same ENV variable names as https://github.com/vegardit/docker-gitea-act-runner
@ -21,16 +17,19 @@ if [[ ! -s .runner ]]; then
try=$((try + 1))
success=0
# The point of this loop is to make it simple, when running both forgejo-runner and gitea in docker,
# for the forgejo-runner to wait a moment for gitea to become available before erroring out. Within
# The point of this loop is to make it simple, when running both act_runner and gitea in docker,
# for the act_runner to wait a moment for gitea to become available before erroring out. Within
# the context of a single docker-compose, something similar could be done via healthchecks, but
# this is more flexible.
while [[ $success -eq 0 ]] && [[ $try -lt ${GITEA_MAX_REG_ATTEMPTS:-10} ]]; do
forgejo-runner register \
act_runner register \
--instance "${GITEA_INSTANCE_URL}" \
--token "${GITEA_RUNNER_REGISTRATION_TOKEN}" \
--name "${GITEA_RUNNER_NAME:-`hostname`}" \
${CONFIG_ARG} ${EXTRA_ARGS} --no-interactive 2>&1 | tee /tmp/reg.log
--labels "${GITEA_RUNNER_LABELS}" \
${CONFIG_ARG} --no-interactive > /tmp/reg.log 2>&1
cat /tmp/reg.log
cat /tmp/reg.log | grep 'Runner registered successfully' > /dev/null
if [[ $? -eq 0 ]]; then
@ -42,7 +41,5 @@ if [[ ! -s .runner ]]; then
fi
done
fi
# Prevent reading the token from the forgejo-runner process
unset GITEA_RUNNER_REGISTRATION_TOKEN
forgejo-runner daemon ${CONFIG_ARG}
act_runner daemon ${CONFIG_ARG}

View file

@ -1,9 +0,0 @@
#!/usr/bin/env bash
# wait for docker daemon
while ! nc -z localhost 2376 </dev/null; do
echo 'waiting for docker daemon...'
sleep 5
done
. /opt/act/run.sh

View file

@ -1,13 +0,0 @@
[supervisord]
nodaemon=true
logfile=/dev/null
logfile_maxbytes=0
[program:dockerd]
command=/usr/local/bin/dockerd-entrypoint.sh
[program:act_runner]
stdout_logfile=/dev/fd/1
stdout_logfile_maxbytes=0
redirect_stderr=true
command=/opt/act/rootless.sh

View file

@ -1,67 +0,0 @@
# Forgejo Runner with systemd User Services
It is possible to use systemd's user services together with
[podman](https://podman.io/) to run `forgejo-runner` using a normal user
account without any privileges and automatically start on boot.
This was last tested on Fedora 39 on 2024-02-19, but should work elsewhere as
well.
Place the `forgejo-runner` binary in `/usr/local/bin/forgejo-runner` and make
sure it can be executed (`chmod +x /usr/local/bin/forgejo-runner`).
Install and enable `podman` as a user service:
```bash
$ sudo dnf -y install podman
```
You *may* need to reboot your system after installing `podman` as it
modifies some system configuration(s) that may need to be activated. Without
rebooting the system my runner errored out when trying to set firewall rules, a
reboot fixed it.
Enable `podman` as a user service:
```
$ systemctl --user start podman.socket
$ systemctl --user enable podman.socket
```
Make sure processes remain after your user account logs out:
```bash
$ loginctl enable-linger
```
Create the file `/etc/systemd/user/forgejo-runner.service` with the following
content:
```
[Unit]
Description=Forgejo Runner
[Service]
Type=simple
ExecStart=/usr/local/bin/forgejo-runner daemon
Restart=on-failure
[Install]
WantedBy=default.target
```
Now activate it as a user service:
```bash
$ systemctl --user daemon-reload
$ systemctl --user start forgejo-runner
$ systemctl --user enable forgejo-runner
```
To see/follow the log of `forgejo-runner`:
```bash
$ journalctl -f -t forgejo-runner
```
If you reboot your system, all should come back automatically.